1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 108 static mempool_t *qla_tgt_mgmt_cmd_mempool; 109 static struct workqueue_struct *qla_tgt_wq; 110 static DEFINE_MUTEX(qla_tgt_mutex); 111 static LIST_HEAD(qla_tgt_glist); 112 113 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 114 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 115 struct qla_tgt *tgt, 116 const uint8_t *port_name) 117 { 118 struct qla_tgt_sess *sess; 119 120 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 121 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 122 return sess; 123 } 124 125 return NULL; 126 } 127 128 /* Might release hw lock, then reaquire!! */ 129 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 130 { 131 /* Send marker if required */ 132 if (unlikely(vha->marker_needed != 0)) { 133 int rc = qla2x00_issue_marker(vha, vha_locked); 134 if (rc != QLA_SUCCESS) { 135 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 136 "qla_target(%d): issue_marker() failed\n", 137 vha->vp_idx); 138 } 139 return rc; 140 } 141 return QLA_SUCCESS; 142 } 143 144 static inline 145 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 146 uint8_t *d_id) 147 { 148 struct qla_hw_data *ha = vha->hw; 149 uint8_t vp_idx; 150 151 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 152 return NULL; 153 154 if (vha->d_id.b.al_pa == d_id[2]) 155 return vha; 156 157 BUG_ON(ha->tgt.tgt_vp_map == NULL); 158 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 159 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 160 return ha->tgt.tgt_vp_map[vp_idx].vha; 161 162 return NULL; 163 } 164 165 static inline 166 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 167 uint16_t vp_idx) 168 { 169 struct qla_hw_data *ha = vha->hw; 170 171 if (vha->vp_idx == vp_idx) 172 return vha; 173 174 BUG_ON(ha->tgt.tgt_vp_map == NULL); 175 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 176 return ha->tgt.tgt_vp_map[vp_idx].vha; 177 178 return NULL; 179 } 180 181 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 182 struct atio_from_isp *atio) 183 { 184 ql_dbg(ql_dbg_tgt, vha, 0xe072, 185 "%s: qla_target(%d): type %x ox_id %04x\n", 186 __func__, vha->vp_idx, atio->u.raw.entry_type, 187 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 188 189 switch (atio->u.raw.entry_type) { 190 case ATIO_TYPE7: 191 { 192 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 193 atio->u.isp24.fcp_hdr.d_id); 194 if (unlikely(NULL == host)) { 195 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 196 "qla_target(%d): Received ATIO_TYPE7 " 197 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 198 atio->u.isp24.fcp_hdr.d_id[0], 199 atio->u.isp24.fcp_hdr.d_id[1], 200 atio->u.isp24.fcp_hdr.d_id[2]); 201 break; 202 } 203 qlt_24xx_atio_pkt(host, atio); 204 break; 205 } 206 207 case IMMED_NOTIFY_TYPE: 208 { 209 struct scsi_qla_host *host = vha; 210 struct imm_ntfy_from_isp *entry = 211 (struct imm_ntfy_from_isp *)atio; 212 213 if ((entry->u.isp24.vp_index != 0xFF) && 214 (entry->u.isp24.nport_handle != 0xFFFF)) { 215 host = qlt_find_host_by_vp_idx(vha, 216 entry->u.isp24.vp_index); 217 if (unlikely(!host)) { 218 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 219 "qla_target(%d): Received " 220 "ATIO (IMMED_NOTIFY_TYPE) " 221 "with unknown vp_index %d\n", 222 vha->vp_idx, entry->u.isp24.vp_index); 223 break; 224 } 225 } 226 qlt_24xx_atio_pkt(host, atio); 227 break; 228 } 229 230 default: 231 ql_dbg(ql_dbg_tgt, vha, 0xe040, 232 "qla_target(%d): Received unknown ATIO atio " 233 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 234 break; 235 } 236 237 return; 238 } 239 240 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 241 { 242 switch (pkt->entry_type) { 243 case CTIO_CRC2: 244 ql_dbg(ql_dbg_tgt, vha, 0xe073, 245 "qla_target(%d):%s: CRC2 Response pkt\n", 246 vha->vp_idx, __func__); 247 case CTIO_TYPE7: 248 { 249 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 250 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 251 entry->vp_index); 252 if (unlikely(!host)) { 253 ql_dbg(ql_dbg_tgt, vha, 0xe041, 254 "qla_target(%d): Response pkt (CTIO_TYPE7) " 255 "received, with unknown vp_index %d\n", 256 vha->vp_idx, entry->vp_index); 257 break; 258 } 259 qlt_response_pkt(host, pkt); 260 break; 261 } 262 263 case IMMED_NOTIFY_TYPE: 264 { 265 struct scsi_qla_host *host = vha; 266 struct imm_ntfy_from_isp *entry = 267 (struct imm_ntfy_from_isp *)pkt; 268 269 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 270 if (unlikely(!host)) { 271 ql_dbg(ql_dbg_tgt, vha, 0xe042, 272 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 273 "received, with unknown vp_index %d\n", 274 vha->vp_idx, entry->u.isp24.vp_index); 275 break; 276 } 277 qlt_response_pkt(host, pkt); 278 break; 279 } 280 281 case NOTIFY_ACK_TYPE: 282 { 283 struct scsi_qla_host *host = vha; 284 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 285 286 if (0xFF != entry->u.isp24.vp_index) { 287 host = qlt_find_host_by_vp_idx(vha, 288 entry->u.isp24.vp_index); 289 if (unlikely(!host)) { 290 ql_dbg(ql_dbg_tgt, vha, 0xe043, 291 "qla_target(%d): Response " 292 "pkt (NOTIFY_ACK_TYPE) " 293 "received, with unknown " 294 "vp_index %d\n", vha->vp_idx, 295 entry->u.isp24.vp_index); 296 break; 297 } 298 } 299 qlt_response_pkt(host, pkt); 300 break; 301 } 302 303 case ABTS_RECV_24XX: 304 { 305 struct abts_recv_from_24xx *entry = 306 (struct abts_recv_from_24xx *)pkt; 307 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 308 entry->vp_index); 309 if (unlikely(!host)) { 310 ql_dbg(ql_dbg_tgt, vha, 0xe044, 311 "qla_target(%d): Response pkt " 312 "(ABTS_RECV_24XX) received, with unknown " 313 "vp_index %d\n", vha->vp_idx, entry->vp_index); 314 break; 315 } 316 qlt_response_pkt(host, pkt); 317 break; 318 } 319 320 case ABTS_RESP_24XX: 321 { 322 struct abts_resp_to_24xx *entry = 323 (struct abts_resp_to_24xx *)pkt; 324 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 325 entry->vp_index); 326 if (unlikely(!host)) { 327 ql_dbg(ql_dbg_tgt, vha, 0xe045, 328 "qla_target(%d): Response pkt " 329 "(ABTS_RECV_24XX) received, with unknown " 330 "vp_index %d\n", vha->vp_idx, entry->vp_index); 331 break; 332 } 333 qlt_response_pkt(host, pkt); 334 break; 335 } 336 337 default: 338 qlt_response_pkt(vha, pkt); 339 break; 340 } 341 342 } 343 344 static void qlt_free_session_done(struct work_struct *work) 345 { 346 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 347 free_work); 348 struct qla_tgt *tgt = sess->tgt; 349 struct scsi_qla_host *vha = sess->vha; 350 struct qla_hw_data *ha = vha->hw; 351 352 BUG_ON(!tgt); 353 /* 354 * Release the target session for FC Nexus from fabric module code. 355 */ 356 if (sess->se_sess != NULL) 357 ha->tgt.tgt_ops->free_session(sess); 358 359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 360 "Unregistration of sess %p finished\n", sess); 361 362 kfree(sess); 363 /* 364 * We need to protect against race, when tgt is freed before or 365 * inside wake_up() 366 */ 367 tgt->sess_count--; 368 if (tgt->sess_count == 0) 369 wake_up_all(&tgt->waitQ); 370 } 371 372 /* ha->hardware_lock supposed to be held on entry */ 373 void qlt_unreg_sess(struct qla_tgt_sess *sess) 374 { 375 struct scsi_qla_host *vha = sess->vha; 376 377 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 378 379 list_del(&sess->sess_list_entry); 380 if (sess->deleted) 381 list_del(&sess->del_list_entry); 382 383 INIT_WORK(&sess->free_work, qlt_free_session_done); 384 schedule_work(&sess->free_work); 385 } 386 EXPORT_SYMBOL(qlt_unreg_sess); 387 388 /* ha->hardware_lock supposed to be held on entry */ 389 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 390 { 391 struct qla_hw_data *ha = vha->hw; 392 struct qla_tgt_sess *sess = NULL; 393 uint32_t unpacked_lun, lun = 0; 394 uint16_t loop_id; 395 int res = 0; 396 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 397 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 398 399 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 400 if (loop_id == 0xFFFF) { 401 #if 0 /* FIXME: Re-enable Global event handling.. */ 402 /* Global event */ 403 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 404 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 405 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 406 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 407 typeof(*sess), sess_list_entry); 408 switch (mcmd) { 409 case QLA_TGT_NEXUS_LOSS_SESS: 410 mcmd = QLA_TGT_NEXUS_LOSS; 411 break; 412 case QLA_TGT_ABORT_ALL_SESS: 413 mcmd = QLA_TGT_ABORT_ALL; 414 break; 415 case QLA_TGT_NEXUS_LOSS: 416 case QLA_TGT_ABORT_ALL: 417 break; 418 default: 419 ql_dbg(ql_dbg_tgt, vha, 0xe046, 420 "qla_target(%d): Not allowed " 421 "command %x in %s", vha->vp_idx, 422 mcmd, __func__); 423 sess = NULL; 424 break; 425 } 426 } else 427 sess = NULL; 428 #endif 429 } else { 430 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 431 } 432 433 ql_dbg(ql_dbg_tgt, vha, 0xe000, 434 "Using sess for qla_tgt_reset: %p\n", sess); 435 if (!sess) { 436 res = -ESRCH; 437 return res; 438 } 439 440 ql_dbg(ql_dbg_tgt, vha, 0xe047, 441 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 442 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 443 mcmd, loop_id); 444 445 lun = a->u.isp24.fcp_cmnd.lun; 446 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 447 448 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 449 iocb, QLA24XX_MGMT_SEND_NACK); 450 } 451 452 /* ha->hardware_lock supposed to be held on entry */ 453 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 454 bool immediate) 455 { 456 struct qla_tgt *tgt = sess->tgt; 457 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 458 459 if (sess->deleted) 460 return; 461 462 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 463 "Scheduling sess %p for deletion\n", sess); 464 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 465 sess->deleted = 1; 466 467 if (immediate) 468 dev_loss_tmo = 0; 469 470 sess->expires = jiffies + dev_loss_tmo * HZ; 471 472 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 473 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 474 "deletion in %u secs (expires: %lu) immed: %d\n", 475 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 476 sess->expires, immediate); 477 478 if (immediate) 479 schedule_delayed_work(&tgt->sess_del_work, 0); 480 else 481 schedule_delayed_work(&tgt->sess_del_work, 482 sess->expires - jiffies); 483 } 484 485 /* ha->hardware_lock supposed to be held on entry */ 486 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 487 { 488 struct qla_tgt_sess *sess; 489 490 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 491 qlt_schedule_sess_for_deletion(sess, true); 492 493 /* At this point tgt could be already dead */ 494 } 495 496 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 497 uint16_t *loop_id) 498 { 499 struct qla_hw_data *ha = vha->hw; 500 dma_addr_t gid_list_dma; 501 struct gid_list_info *gid_list; 502 char *id_iter; 503 int res, rc, i; 504 uint16_t entries; 505 506 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 507 &gid_list_dma, GFP_KERNEL); 508 if (!gid_list) { 509 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 510 "qla_target(%d): DMA Alloc failed of %u\n", 511 vha->vp_idx, qla2x00_gid_list_size(ha)); 512 return -ENOMEM; 513 } 514 515 /* Get list of logged in devices */ 516 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 517 if (rc != QLA_SUCCESS) { 518 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 519 "qla_target(%d): get_id_list() failed: %x\n", 520 vha->vp_idx, rc); 521 res = -1; 522 goto out_free_id_list; 523 } 524 525 id_iter = (char *)gid_list; 526 res = -1; 527 for (i = 0; i < entries; i++) { 528 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 529 if ((gid->al_pa == s_id[2]) && 530 (gid->area == s_id[1]) && 531 (gid->domain == s_id[0])) { 532 *loop_id = le16_to_cpu(gid->loop_id); 533 res = 0; 534 break; 535 } 536 id_iter += ha->gid_list_info_size; 537 } 538 539 out_free_id_list: 540 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 541 gid_list, gid_list_dma); 542 return res; 543 } 544 545 /* ha->hardware_lock supposed to be held on entry */ 546 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 547 { 548 BUG_ON(!sess->deleted); 549 550 list_del(&sess->del_list_entry); 551 sess->deleted = 0; 552 } 553 554 static void qlt_del_sess_work_fn(struct delayed_work *work) 555 { 556 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 557 sess_del_work); 558 struct scsi_qla_host *vha = tgt->vha; 559 struct qla_hw_data *ha = vha->hw; 560 struct qla_tgt_sess *sess; 561 unsigned long flags, elapsed; 562 563 spin_lock_irqsave(&ha->hardware_lock, flags); 564 while (!list_empty(&tgt->del_sess_list)) { 565 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 566 del_list_entry); 567 elapsed = jiffies; 568 if (time_after_eq(elapsed, sess->expires)) { 569 qlt_undelete_sess(sess); 570 571 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 572 "Timeout: sess %p about to be deleted\n", 573 sess); 574 ha->tgt.tgt_ops->shutdown_sess(sess); 575 ha->tgt.tgt_ops->put_sess(sess); 576 } else { 577 schedule_delayed_work(&tgt->sess_del_work, 578 sess->expires - elapsed); 579 break; 580 } 581 } 582 spin_unlock_irqrestore(&ha->hardware_lock, flags); 583 } 584 585 /* 586 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 587 * Caller must put it. 588 */ 589 static struct qla_tgt_sess *qlt_create_sess( 590 struct scsi_qla_host *vha, 591 fc_port_t *fcport, 592 bool local) 593 { 594 struct qla_hw_data *ha = vha->hw; 595 struct qla_tgt_sess *sess; 596 unsigned long flags; 597 unsigned char be_sid[3]; 598 599 /* Check to avoid double sessions */ 600 spin_lock_irqsave(&ha->hardware_lock, flags); 601 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 602 sess_list_entry) { 603 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 605 "Double sess %p found (s_id %x:%x:%x, " 606 "loop_id %d), updating to d_id %x:%x:%x, " 607 "loop_id %d", sess, sess->s_id.b.domain, 608 sess->s_id.b.al_pa, sess->s_id.b.area, 609 sess->loop_id, fcport->d_id.b.domain, 610 fcport->d_id.b.al_pa, fcport->d_id.b.area, 611 fcport->loop_id); 612 613 if (sess->deleted) 614 qlt_undelete_sess(sess); 615 616 kref_get(&sess->se_sess->sess_kref); 617 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 618 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 619 620 if (sess->local && !local) 621 sess->local = 0; 622 spin_unlock_irqrestore(&ha->hardware_lock, flags); 623 624 return sess; 625 } 626 } 627 spin_unlock_irqrestore(&ha->hardware_lock, flags); 628 629 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 630 if (!sess) { 631 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 632 "qla_target(%u): session allocation failed, all commands " 633 "from port %8phC will be refused", vha->vp_idx, 634 fcport->port_name); 635 636 return NULL; 637 } 638 sess->tgt = vha->vha_tgt.qla_tgt; 639 sess->vha = vha; 640 sess->s_id = fcport->d_id; 641 sess->loop_id = fcport->loop_id; 642 sess->local = local; 643 644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 645 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 646 sess, vha->vha_tgt.qla_tgt); 647 648 be_sid[0] = sess->s_id.b.domain; 649 be_sid[1] = sess->s_id.b.area; 650 be_sid[2] = sess->s_id.b.al_pa; 651 /* 652 * Determine if this fc_port->port_name is allowed to access 653 * target mode using explict NodeACLs+MappedLUNs, or using 654 * TPG demo mode. If this is successful a target mode FC nexus 655 * is created. 656 */ 657 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 658 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 659 kfree(sess); 660 return NULL; 661 } 662 /* 663 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 664 * access across ->hardware_lock reaquire. 665 */ 666 kref_get(&sess->se_sess->sess_kref); 667 668 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 669 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 670 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 671 672 spin_lock_irqsave(&ha->hardware_lock, flags); 673 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 674 vha->vha_tgt.qla_tgt->sess_count++; 675 spin_unlock_irqrestore(&ha->hardware_lock, flags); 676 677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 678 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 679 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 680 vha->vp_idx, local ? "local " : "", fcport->port_name, 681 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 682 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 683 684 return sess; 685 } 686 687 /* 688 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 689 */ 690 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 691 { 692 struct qla_hw_data *ha = vha->hw; 693 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 694 struct qla_tgt_sess *sess; 695 unsigned long flags; 696 697 if (!vha->hw->tgt.tgt_ops) 698 return; 699 700 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 701 return; 702 703 if (qla_ini_mode_enabled(vha)) 704 return; 705 706 spin_lock_irqsave(&ha->hardware_lock, flags); 707 if (tgt->tgt_stop) { 708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 709 return; 710 } 711 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 712 if (!sess) { 713 spin_unlock_irqrestore(&ha->hardware_lock, flags); 714 715 mutex_lock(&vha->vha_tgt.tgt_mutex); 716 sess = qlt_create_sess(vha, fcport, false); 717 mutex_unlock(&vha->vha_tgt.tgt_mutex); 718 719 spin_lock_irqsave(&ha->hardware_lock, flags); 720 } else { 721 kref_get(&sess->se_sess->sess_kref); 722 723 if (sess->deleted) { 724 qlt_undelete_sess(sess); 725 726 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 727 "qla_target(%u): %ssession for port %8phC " 728 "(loop ID %d) reappeared\n", vha->vp_idx, 729 sess->local ? "local " : "", sess->port_name, 730 sess->loop_id); 731 732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 733 "Reappeared sess %p\n", sess); 734 } 735 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 736 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 737 } 738 739 if (sess && sess->local) { 740 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 741 "qla_target(%u): local session for " 742 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 743 fcport->port_name, sess->loop_id); 744 sess->local = 0; 745 } 746 ha->tgt.tgt_ops->put_sess(sess); 747 spin_unlock_irqrestore(&ha->hardware_lock, flags); 748 } 749 750 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 751 { 752 struct qla_hw_data *ha = vha->hw; 753 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 754 struct qla_tgt_sess *sess; 755 unsigned long flags; 756 757 if (!vha->hw->tgt.tgt_ops) 758 return; 759 760 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 761 return; 762 763 spin_lock_irqsave(&ha->hardware_lock, flags); 764 if (tgt->tgt_stop) { 765 spin_unlock_irqrestore(&ha->hardware_lock, flags); 766 return; 767 } 768 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 769 if (!sess) { 770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 771 return; 772 } 773 774 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 775 776 sess->local = 1; 777 qlt_schedule_sess_for_deletion(sess, false); 778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 779 } 780 781 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 782 { 783 struct qla_hw_data *ha = tgt->ha; 784 unsigned long flags; 785 int res; 786 /* 787 * We need to protect against race, when tgt is freed before or 788 * inside wake_up() 789 */ 790 spin_lock_irqsave(&ha->hardware_lock, flags); 791 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 792 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 793 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 794 res = (tgt->sess_count == 0); 795 spin_unlock_irqrestore(&ha->hardware_lock, flags); 796 797 return res; 798 } 799 800 /* Called by tcm_qla2xxx configfs code */ 801 int qlt_stop_phase1(struct qla_tgt *tgt) 802 { 803 struct scsi_qla_host *vha = tgt->vha; 804 struct qla_hw_data *ha = tgt->ha; 805 unsigned long flags; 806 807 mutex_lock(&qla_tgt_mutex); 808 if (!vha->fc_vport) { 809 struct Scsi_Host *sh = vha->host; 810 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 811 bool npiv_vports; 812 813 spin_lock_irqsave(sh->host_lock, flags); 814 npiv_vports = (fc_host->npiv_vports_inuse); 815 spin_unlock_irqrestore(sh->host_lock, flags); 816 817 if (npiv_vports) { 818 mutex_unlock(&qla_tgt_mutex); 819 return -EPERM; 820 } 821 } 822 if (tgt->tgt_stop || tgt->tgt_stopped) { 823 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 824 "Already in tgt->tgt_stop or tgt_stopped state\n"); 825 mutex_unlock(&qla_tgt_mutex); 826 return -EPERM; 827 } 828 829 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 830 vha->host_no, vha); 831 /* 832 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 833 * Lock is needed, because we still can get an incoming packet. 834 */ 835 mutex_lock(&vha->vha_tgt.tgt_mutex); 836 spin_lock_irqsave(&ha->hardware_lock, flags); 837 tgt->tgt_stop = 1; 838 qlt_clear_tgt_db(tgt, true); 839 spin_unlock_irqrestore(&ha->hardware_lock, flags); 840 mutex_unlock(&vha->vha_tgt.tgt_mutex); 841 mutex_unlock(&qla_tgt_mutex); 842 843 flush_delayed_work(&tgt->sess_del_work); 844 845 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 846 "Waiting for sess works (tgt %p)", tgt); 847 spin_lock_irqsave(&tgt->sess_work_lock, flags); 848 while (!list_empty(&tgt->sess_works_list)) { 849 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 850 flush_scheduled_work(); 851 spin_lock_irqsave(&tgt->sess_work_lock, flags); 852 } 853 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 854 855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 856 "Waiting for tgt %p: list_empty(sess_list)=%d " 857 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 858 tgt->sess_count); 859 860 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 861 862 /* Big hammer */ 863 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 864 qlt_disable_vha(vha); 865 866 /* Wait for sessions to clear out (just in case) */ 867 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 868 return 0; 869 } 870 EXPORT_SYMBOL(qlt_stop_phase1); 871 872 /* Called by tcm_qla2xxx configfs code */ 873 void qlt_stop_phase2(struct qla_tgt *tgt) 874 { 875 struct qla_hw_data *ha = tgt->ha; 876 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 877 unsigned long flags; 878 879 if (tgt->tgt_stopped) { 880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 881 "Already in tgt->tgt_stopped state\n"); 882 dump_stack(); 883 return; 884 } 885 886 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 887 "Waiting for %d IRQ commands to complete (tgt %p)", 888 tgt->irq_cmd_count, tgt); 889 890 mutex_lock(&vha->vha_tgt.tgt_mutex); 891 spin_lock_irqsave(&ha->hardware_lock, flags); 892 while (tgt->irq_cmd_count != 0) { 893 spin_unlock_irqrestore(&ha->hardware_lock, flags); 894 udelay(2); 895 spin_lock_irqsave(&ha->hardware_lock, flags); 896 } 897 tgt->tgt_stop = 0; 898 tgt->tgt_stopped = 1; 899 spin_unlock_irqrestore(&ha->hardware_lock, flags); 900 mutex_unlock(&vha->vha_tgt.tgt_mutex); 901 902 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 903 tgt); 904 } 905 EXPORT_SYMBOL(qlt_stop_phase2); 906 907 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 908 static void qlt_release(struct qla_tgt *tgt) 909 { 910 scsi_qla_host_t *vha = tgt->vha; 911 912 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 913 qlt_stop_phase2(tgt); 914 915 vha->vha_tgt.qla_tgt = NULL; 916 917 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 918 "Release of tgt %p finished\n", tgt); 919 920 kfree(tgt); 921 } 922 923 /* ha->hardware_lock supposed to be held on entry */ 924 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 925 const void *param, unsigned int param_size) 926 { 927 struct qla_tgt_sess_work_param *prm; 928 unsigned long flags; 929 930 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 931 if (!prm) { 932 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 933 "qla_target(%d): Unable to create session " 934 "work, command will be refused", 0); 935 return -ENOMEM; 936 } 937 938 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 939 "Scheduling work (type %d, prm %p)" 940 " to find session for param %p (size %d, tgt %p)\n", 941 type, prm, param, param_size, tgt); 942 943 prm->type = type; 944 memcpy(&prm->tm_iocb, param, param_size); 945 946 spin_lock_irqsave(&tgt->sess_work_lock, flags); 947 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 948 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 949 950 schedule_work(&tgt->sess_work); 951 952 return 0; 953 } 954 955 /* 956 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 957 */ 958 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 959 struct imm_ntfy_from_isp *ntfy, 960 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 961 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 962 { 963 struct qla_hw_data *ha = vha->hw; 964 request_t *pkt; 965 struct nack_to_isp *nack; 966 967 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 968 969 /* Send marker if required */ 970 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 971 return; 972 973 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 974 if (!pkt) { 975 ql_dbg(ql_dbg_tgt, vha, 0xe049, 976 "qla_target(%d): %s failed: unable to allocate " 977 "request packet\n", vha->vp_idx, __func__); 978 return; 979 } 980 981 if (vha->vha_tgt.qla_tgt != NULL) 982 vha->vha_tgt.qla_tgt->notify_ack_expected++; 983 984 pkt->entry_type = NOTIFY_ACK_TYPE; 985 pkt->entry_count = 1; 986 987 nack = (struct nack_to_isp *)pkt; 988 nack->ox_id = ntfy->ox_id; 989 990 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 991 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 992 nack->u.isp24.flags = ntfy->u.isp24.flags & 993 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 994 } 995 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 996 nack->u.isp24.status = ntfy->u.isp24.status; 997 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 998 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 999 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1000 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1001 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1002 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1003 nack->u.isp24.srr_reject_code = srr_reject_code; 1004 nack->u.isp24.srr_reject_code_expl = srr_explan; 1005 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1006 1007 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1008 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1009 vha->vp_idx, nack->u.isp24.status); 1010 1011 qla2x00_start_iocbs(vha, vha->req); 1012 } 1013 1014 /* 1015 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1016 */ 1017 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1018 struct abts_recv_from_24xx *abts, uint32_t status, 1019 bool ids_reversed) 1020 { 1021 struct qla_hw_data *ha = vha->hw; 1022 struct abts_resp_to_24xx *resp; 1023 uint32_t f_ctl; 1024 uint8_t *p; 1025 1026 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1027 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1028 ha, abts, status); 1029 1030 /* Send marker if required */ 1031 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1032 return; 1033 1034 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1035 if (!resp) { 1036 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1037 "qla_target(%d): %s failed: unable to allocate " 1038 "request packet", vha->vp_idx, __func__); 1039 return; 1040 } 1041 1042 resp->entry_type = ABTS_RESP_24XX; 1043 resp->entry_count = 1; 1044 resp->nport_handle = abts->nport_handle; 1045 resp->vp_index = vha->vp_idx; 1046 resp->sof_type = abts->sof_type; 1047 resp->exchange_address = abts->exchange_address; 1048 resp->fcp_hdr_le = abts->fcp_hdr_le; 1049 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1050 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1051 F_CTL_SEQ_INITIATIVE); 1052 p = (uint8_t *)&f_ctl; 1053 resp->fcp_hdr_le.f_ctl[0] = *p++; 1054 resp->fcp_hdr_le.f_ctl[1] = *p++; 1055 resp->fcp_hdr_le.f_ctl[2] = *p; 1056 if (ids_reversed) { 1057 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1058 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1059 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1060 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1061 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1062 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1063 } else { 1064 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1065 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1066 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1067 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1068 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1069 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1070 } 1071 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1072 if (status == FCP_TMF_CMPL) { 1073 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1074 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1075 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1076 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1077 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1078 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1079 } else { 1080 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1081 resp->payload.ba_rjt.reason_code = 1082 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1083 /* Other bytes are zero */ 1084 } 1085 1086 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1087 1088 qla2x00_start_iocbs(vha, vha->req); 1089 } 1090 1091 /* 1092 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1093 */ 1094 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1095 struct abts_resp_from_24xx_fw *entry) 1096 { 1097 struct ctio7_to_24xx *ctio; 1098 1099 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1100 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1101 /* Send marker if required */ 1102 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1103 return; 1104 1105 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1106 if (ctio == NULL) { 1107 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1108 "qla_target(%d): %s failed: unable to allocate " 1109 "request packet\n", vha->vp_idx, __func__); 1110 return; 1111 } 1112 1113 /* 1114 * We've got on entrance firmware's response on by us generated 1115 * ABTS response. So, in it ID fields are reversed. 1116 */ 1117 1118 ctio->entry_type = CTIO_TYPE7; 1119 ctio->entry_count = 1; 1120 ctio->nport_handle = entry->nport_handle; 1121 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1122 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1123 ctio->vp_index = vha->vp_idx; 1124 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1125 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1126 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1127 ctio->exchange_addr = entry->exchange_addr_to_abort; 1128 ctio->u.status1.flags = 1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1130 CTIO7_FLAGS_TERMINATE); 1131 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1132 1133 qla2x00_start_iocbs(vha, vha->req); 1134 1135 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1136 FCP_TMF_CMPL, true); 1137 } 1138 1139 /* ha->hardware_lock supposed to be held on entry */ 1140 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1141 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1142 { 1143 struct qla_hw_data *ha = vha->hw; 1144 struct se_session *se_sess = sess->se_sess; 1145 struct qla_tgt_mgmt_cmd *mcmd; 1146 struct se_cmd *se_cmd; 1147 u32 lun = 0; 1148 int rc; 1149 bool found_lun = false; 1150 1151 spin_lock(&se_sess->sess_cmd_lock); 1152 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1153 struct qla_tgt_cmd *cmd = 1154 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1155 if (cmd->tag == abts->exchange_addr_to_abort) { 1156 lun = cmd->unpacked_lun; 1157 found_lun = true; 1158 break; 1159 } 1160 } 1161 spin_unlock(&se_sess->sess_cmd_lock); 1162 1163 if (!found_lun) 1164 return -ENOENT; 1165 1166 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1167 "qla_target(%d): task abort (tag=%d)\n", 1168 vha->vp_idx, abts->exchange_addr_to_abort); 1169 1170 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1171 if (mcmd == NULL) { 1172 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1173 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1174 vha->vp_idx, __func__); 1175 return -ENOMEM; 1176 } 1177 memset(mcmd, 0, sizeof(*mcmd)); 1178 1179 mcmd->sess = sess; 1180 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1181 1182 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1183 abts->exchange_addr_to_abort); 1184 if (rc != 0) { 1185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1186 "qla_target(%d): tgt_ops->handle_tmr()" 1187 " failed: %d", vha->vp_idx, rc); 1188 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1189 return -EFAULT; 1190 } 1191 1192 return 0; 1193 } 1194 1195 /* 1196 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1197 */ 1198 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1199 struct abts_recv_from_24xx *abts) 1200 { 1201 struct qla_hw_data *ha = vha->hw; 1202 struct qla_tgt_sess *sess; 1203 uint32_t tag = abts->exchange_addr_to_abort; 1204 uint8_t s_id[3]; 1205 int rc; 1206 1207 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1209 "qla_target(%d): ABTS: Abort Sequence not " 1210 "supported\n", vha->vp_idx); 1211 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1212 return; 1213 } 1214 1215 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1217 "qla_target(%d): ABTS: Unknown Exchange " 1218 "Address received\n", vha->vp_idx); 1219 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1220 return; 1221 } 1222 1223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1224 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1225 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1226 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1227 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1228 1229 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1230 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1231 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1232 1233 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1234 if (!sess) { 1235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1236 "qla_target(%d): task abort for non-existant session\n", 1237 vha->vp_idx); 1238 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1239 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1240 if (rc != 0) { 1241 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1242 false); 1243 } 1244 return; 1245 } 1246 1247 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1248 if (rc != 0) { 1249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1250 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1251 vha->vp_idx, rc); 1252 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1253 return; 1254 } 1255 } 1256 1257 /* 1258 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1259 */ 1260 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1261 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1262 { 1263 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1264 struct ctio7_to_24xx *ctio; 1265 uint16_t temp; 1266 1267 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1268 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1269 ha, atio, resp_code); 1270 1271 /* Send marker if required */ 1272 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1273 return; 1274 1275 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1276 if (ctio == NULL) { 1277 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1278 "qla_target(%d): %s failed: unable to allocate " 1279 "request packet\n", ha->vp_idx, __func__); 1280 return; 1281 } 1282 1283 ctio->entry_type = CTIO_TYPE7; 1284 ctio->entry_count = 1; 1285 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1286 ctio->nport_handle = mcmd->sess->loop_id; 1287 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1288 ctio->vp_index = ha->vp_idx; 1289 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1290 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1291 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1292 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1293 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1294 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1295 CTIO7_FLAGS_SEND_STATUS); 1296 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 1297 ctio->u.status1.ox_id = cpu_to_le16(temp); 1298 ctio->u.status1.scsi_status = 1299 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1300 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1301 ctio->u.status1.sense_data[0] = resp_code; 1302 1303 qla2x00_start_iocbs(ha, ha->req); 1304 } 1305 1306 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1307 { 1308 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1309 } 1310 EXPORT_SYMBOL(qlt_free_mcmd); 1311 1312 /* callback from target fabric module code */ 1313 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1314 { 1315 struct scsi_qla_host *vha = mcmd->sess->vha; 1316 struct qla_hw_data *ha = vha->hw; 1317 unsigned long flags; 1318 1319 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1320 "TM response mcmd (%p) status %#x state %#x", 1321 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1322 1323 spin_lock_irqsave(&ha->hardware_lock, flags); 1324 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1325 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1326 0, 0, 0, 0, 0, 0); 1327 else { 1328 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1329 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1330 mcmd->fc_tm_rsp, false); 1331 else 1332 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1333 mcmd->fc_tm_rsp); 1334 } 1335 /* 1336 * Make the callback for ->free_mcmd() to queue_work() and invoke 1337 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1338 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1339 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1340 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1341 * qlt_xmit_tm_rsp() returns here.. 1342 */ 1343 ha->tgt.tgt_ops->free_mcmd(mcmd); 1344 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1345 } 1346 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1347 1348 /* No locks */ 1349 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1350 { 1351 struct qla_tgt_cmd *cmd = prm->cmd; 1352 1353 BUG_ON(cmd->sg_cnt == 0); 1354 1355 prm->sg = (struct scatterlist *)cmd->sg; 1356 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1357 cmd->sg_cnt, cmd->dma_data_direction); 1358 if (unlikely(prm->seg_cnt == 0)) 1359 goto out_err; 1360 1361 prm->cmd->sg_mapped = 1; 1362 1363 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 1364 /* 1365 * If greater than four sg entries then we need to allocate 1366 * the continuation entries 1367 */ 1368 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1369 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1370 prm->tgt->datasegs_per_cmd, 1371 prm->tgt->datasegs_per_cont); 1372 } else { 1373 /* DIF */ 1374 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1375 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1376 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 1377 prm->tot_dsds = prm->seg_cnt; 1378 } else 1379 prm->tot_dsds = prm->seg_cnt; 1380 1381 if (cmd->prot_sg_cnt) { 1382 prm->prot_sg = cmd->prot_sg; 1383 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, 1384 cmd->prot_sg, cmd->prot_sg_cnt, 1385 cmd->dma_data_direction); 1386 if (unlikely(prm->prot_seg_cnt == 0)) 1387 goto out_err; 1388 1389 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1390 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1391 /* Dif Bundling not support here */ 1392 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 1393 cmd->blk_sz); 1394 prm->tot_dsds += prm->prot_seg_cnt; 1395 } else 1396 prm->tot_dsds += prm->prot_seg_cnt; 1397 } 1398 } 1399 1400 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1401 prm->seg_cnt, prm->req_cnt); 1402 return 0; 1403 1404 out_err: 1405 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1406 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1407 0, prm->cmd->sg_cnt); 1408 return -1; 1409 } 1410 1411 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1412 struct qla_tgt_cmd *cmd) 1413 { 1414 struct qla_hw_data *ha = vha->hw; 1415 1416 BUG_ON(!cmd->sg_mapped); 1417 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1418 cmd->sg_mapped = 0; 1419 1420 if (cmd->prot_sg_cnt) 1421 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 1422 cmd->dma_data_direction); 1423 1424 if (cmd->ctx_dsd_alloced) 1425 qla2x00_clean_dsd_pool(ha, NULL, cmd); 1426 1427 if (cmd->ctx) 1428 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 1429 } 1430 1431 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1432 uint32_t req_cnt) 1433 { 1434 struct qla_hw_data *ha = vha->hw; 1435 device_reg_t __iomem *reg = ha->iobase; 1436 uint32_t cnt; 1437 1438 if (vha->req->cnt < (req_cnt + 2)) { 1439 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1440 1441 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1442 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1443 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1444 vha->req->ring_index, vha->req->cnt, req_cnt); 1445 if (vha->req->ring_index < cnt) 1446 vha->req->cnt = cnt - vha->req->ring_index; 1447 else 1448 vha->req->cnt = vha->req->length - 1449 (vha->req->ring_index - cnt); 1450 } 1451 1452 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1453 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1454 "qla_target(%d): There is no room in the " 1455 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1456 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1457 vha->req->cnt, req_cnt); 1458 return -EAGAIN; 1459 } 1460 vha->req->cnt -= req_cnt; 1461 1462 return 0; 1463 } 1464 1465 /* 1466 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1467 */ 1468 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1469 { 1470 /* Adjust ring index. */ 1471 vha->req->ring_index++; 1472 if (vha->req->ring_index == vha->req->length) { 1473 vha->req->ring_index = 0; 1474 vha->req->ring_ptr = vha->req->ring; 1475 } else { 1476 vha->req->ring_ptr++; 1477 } 1478 return (cont_entry_t *)vha->req->ring_ptr; 1479 } 1480 1481 /* ha->hardware_lock supposed to be held on entry */ 1482 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1483 { 1484 struct qla_hw_data *ha = vha->hw; 1485 uint32_t h; 1486 1487 h = ha->tgt.current_handle; 1488 /* always increment cmd handle */ 1489 do { 1490 ++h; 1491 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1492 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1493 if (h == ha->tgt.current_handle) { 1494 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1495 "qla_target(%d): Ran out of " 1496 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1497 h = QLA_TGT_NULL_HANDLE; 1498 break; 1499 } 1500 } while ((h == QLA_TGT_NULL_HANDLE) || 1501 (h == QLA_TGT_SKIP_HANDLE) || 1502 (ha->tgt.cmds[h-1] != NULL)); 1503 1504 if (h != QLA_TGT_NULL_HANDLE) 1505 ha->tgt.current_handle = h; 1506 1507 return h; 1508 } 1509 1510 /* ha->hardware_lock supposed to be held on entry */ 1511 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1512 struct scsi_qla_host *vha) 1513 { 1514 uint32_t h; 1515 struct ctio7_to_24xx *pkt; 1516 struct qla_hw_data *ha = vha->hw; 1517 struct atio_from_isp *atio = &prm->cmd->atio; 1518 uint16_t temp; 1519 1520 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1521 prm->pkt = pkt; 1522 memset(pkt, 0, sizeof(*pkt)); 1523 1524 pkt->entry_type = CTIO_TYPE7; 1525 pkt->entry_count = (uint8_t)prm->req_cnt; 1526 pkt->vp_index = vha->vp_idx; 1527 1528 h = qlt_make_handle(vha); 1529 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1530 /* 1531 * CTIO type 7 from the firmware doesn't provide a way to 1532 * know the initiator's LOOP ID, hence we can't find 1533 * the session and, so, the command. 1534 */ 1535 return -EAGAIN; 1536 } else 1537 ha->tgt.cmds[h-1] = prm->cmd; 1538 1539 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1540 pkt->nport_handle = prm->cmd->loop_id; 1541 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1542 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1543 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1544 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1545 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1546 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1547 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 1548 pkt->u.status0.ox_id = cpu_to_le16(temp); 1549 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1550 1551 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1552 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1553 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); 1554 return 0; 1555 } 1556 1557 /* 1558 * ha->hardware_lock supposed to be held on entry. We have already made sure 1559 * that there is sufficient amount of request entries to not drop it. 1560 */ 1561 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1562 struct scsi_qla_host *vha) 1563 { 1564 int cnt; 1565 uint32_t *dword_ptr; 1566 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1567 1568 /* Build continuation packets */ 1569 while (prm->seg_cnt > 0) { 1570 cont_a64_entry_t *cont_pkt64 = 1571 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1572 1573 /* 1574 * Make sure that from cont_pkt64 none of 1575 * 64-bit specific fields used for 32-bit 1576 * addressing. Cast to (cont_entry_t *) for 1577 * that. 1578 */ 1579 1580 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1581 1582 cont_pkt64->entry_count = 1; 1583 cont_pkt64->sys_define = 0; 1584 1585 if (enable_64bit_addressing) { 1586 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1587 dword_ptr = 1588 (uint32_t *)&cont_pkt64->dseg_0_address; 1589 } else { 1590 cont_pkt64->entry_type = CONTINUE_TYPE; 1591 dword_ptr = 1592 (uint32_t *)&((cont_entry_t *) 1593 cont_pkt64)->dseg_0_address; 1594 } 1595 1596 /* Load continuation entry data segments */ 1597 for (cnt = 0; 1598 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1599 cnt++, prm->seg_cnt--) { 1600 *dword_ptr++ = 1601 cpu_to_le32(pci_dma_lo32 1602 (sg_dma_address(prm->sg))); 1603 if (enable_64bit_addressing) { 1604 *dword_ptr++ = 1605 cpu_to_le32(pci_dma_hi32 1606 (sg_dma_address 1607 (prm->sg))); 1608 } 1609 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1610 1611 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1612 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1613 (long long unsigned int) 1614 pci_dma_hi32(sg_dma_address(prm->sg)), 1615 (long long unsigned int) 1616 pci_dma_lo32(sg_dma_address(prm->sg)), 1617 (int)sg_dma_len(prm->sg)); 1618 1619 prm->sg = sg_next(prm->sg); 1620 } 1621 } 1622 } 1623 1624 /* 1625 * ha->hardware_lock supposed to be held on entry. We have already made sure 1626 * that there is sufficient amount of request entries to not drop it. 1627 */ 1628 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1629 struct scsi_qla_host *vha) 1630 { 1631 int cnt; 1632 uint32_t *dword_ptr; 1633 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1634 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1635 1636 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1637 "iocb->scsi_status=%x, iocb->flags=%x\n", 1638 le16_to_cpu(pkt24->u.status0.scsi_status), 1639 le16_to_cpu(pkt24->u.status0.flags)); 1640 1641 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1642 1643 /* Setup packet address segment pointer */ 1644 dword_ptr = pkt24->u.status0.dseg_0_address; 1645 1646 /* Set total data segment count */ 1647 if (prm->seg_cnt) 1648 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1649 1650 if (prm->seg_cnt == 0) { 1651 /* No data transfer */ 1652 *dword_ptr++ = 0; 1653 *dword_ptr = 0; 1654 return; 1655 } 1656 1657 /* If scatter gather */ 1658 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1659 1660 /* Load command entry data segments */ 1661 for (cnt = 0; 1662 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1663 cnt++, prm->seg_cnt--) { 1664 *dword_ptr++ = 1665 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1666 if (enable_64bit_addressing) { 1667 *dword_ptr++ = 1668 cpu_to_le32(pci_dma_hi32( 1669 sg_dma_address(prm->sg))); 1670 } 1671 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1672 1673 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1674 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1675 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1676 prm->sg)), 1677 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1678 prm->sg)), 1679 (int)sg_dma_len(prm->sg)); 1680 1681 prm->sg = sg_next(prm->sg); 1682 } 1683 1684 qlt_load_cont_data_segments(prm, vha); 1685 } 1686 1687 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1688 { 1689 return cmd->bufflen > 0; 1690 } 1691 1692 /* 1693 * Called without ha->hardware_lock held 1694 */ 1695 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1696 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1697 uint32_t *full_req_cnt) 1698 { 1699 struct qla_tgt *tgt = cmd->tgt; 1700 struct scsi_qla_host *vha = tgt->vha; 1701 struct qla_hw_data *ha = vha->hw; 1702 struct se_cmd *se_cmd = &cmd->se_cmd; 1703 1704 if (unlikely(cmd->aborted)) { 1705 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1706 "qla_target(%d): terminating exchange " 1707 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1708 se_cmd, cmd->tag); 1709 1710 cmd->state = QLA_TGT_STATE_ABORTED; 1711 1712 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1713 1714 /* !! At this point cmd could be already freed !! */ 1715 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1716 } 1717 1718 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", 1719 vha->vp_idx, cmd->tag, 1720 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 1721 1722 prm->cmd = cmd; 1723 prm->tgt = tgt; 1724 prm->rq_result = scsi_status; 1725 prm->sense_buffer = &cmd->sense_buffer[0]; 1726 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1727 prm->sg = NULL; 1728 prm->seg_cnt = -1; 1729 prm->req_cnt = 1; 1730 prm->add_status_pkt = 0; 1731 1732 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1733 prm->rq_result, xmit_type); 1734 1735 /* Send marker if required */ 1736 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1737 return -EFAULT; 1738 1739 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1740 1741 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1742 if (qlt_pci_map_calc_cnt(prm) != 0) 1743 return -EAGAIN; 1744 } 1745 1746 *full_req_cnt = prm->req_cnt; 1747 1748 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1749 prm->residual = se_cmd->residual_count; 1750 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1751 "Residual underflow: %d (tag %d, " 1752 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1753 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1754 cmd->bufflen, prm->rq_result); 1755 prm->rq_result |= SS_RESIDUAL_UNDER; 1756 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1757 prm->residual = se_cmd->residual_count; 1758 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1759 "Residual overflow: %d (tag %d, " 1760 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1761 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1762 cmd->bufflen, prm->rq_result); 1763 prm->rq_result |= SS_RESIDUAL_OVER; 1764 } 1765 1766 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1767 /* 1768 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1769 * ignored in *xmit_response() below 1770 */ 1771 if (qlt_has_data(cmd)) { 1772 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1773 (IS_FWI2_CAPABLE(ha) && 1774 (prm->rq_result != 0))) { 1775 prm->add_status_pkt = 1; 1776 (*full_req_cnt)++; 1777 } 1778 } 1779 } 1780 1781 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1782 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1783 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1784 1785 return 0; 1786 } 1787 1788 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1789 struct qla_tgt_cmd *cmd, int sending_sense) 1790 { 1791 if (ha->tgt.enable_class_2) 1792 return 0; 1793 1794 if (sending_sense) 1795 return cmd->conf_compl_supported; 1796 else 1797 return ha->tgt.enable_explicit_conf && 1798 cmd->conf_compl_supported; 1799 } 1800 1801 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1802 /* 1803 * Original taken from the XFS code 1804 */ 1805 static unsigned long qlt_srr_random(void) 1806 { 1807 static int Inited; 1808 static unsigned long RandomValue; 1809 static DEFINE_SPINLOCK(lock); 1810 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1811 register long rv; 1812 register long lo; 1813 register long hi; 1814 unsigned long flags; 1815 1816 spin_lock_irqsave(&lock, flags); 1817 if (!Inited) { 1818 RandomValue = jiffies; 1819 Inited = 1; 1820 } 1821 rv = RandomValue; 1822 hi = rv / 127773; 1823 lo = rv % 127773; 1824 rv = 16807 * lo - 2836 * hi; 1825 if (rv <= 0) 1826 rv += 2147483647; 1827 RandomValue = rv; 1828 spin_unlock_irqrestore(&lock, flags); 1829 return rv; 1830 } 1831 1832 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1833 { 1834 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1835 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1836 == 50) { 1837 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1838 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1839 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1840 } 1841 #endif 1842 /* 1843 * It's currently not possible to simulate SRRs for FCP_WRITE without 1844 * a physical link layer failure, so don't even try here.. 1845 */ 1846 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1847 return; 1848 1849 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1850 ((qlt_srr_random() % 100) == 20)) { 1851 int i, leave = 0; 1852 unsigned int tot_len = 0; 1853 1854 while (leave == 0) 1855 leave = qlt_srr_random() % cmd->sg_cnt; 1856 1857 for (i = 0; i < leave; i++) 1858 tot_len += cmd->sg[i].length; 1859 1860 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1861 "Cutting cmd %p (tag %d) buffer" 1862 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1863 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1864 cmd->bufflen, cmd->sg_cnt); 1865 1866 cmd->bufflen = tot_len; 1867 cmd->sg_cnt = leave; 1868 } 1869 1870 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1871 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1872 1873 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1874 "Cutting cmd %p (tag %d) buffer head " 1875 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1876 cmd->bufflen); 1877 if (offset == 0) 1878 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1879 else if (qlt_set_data_offset(cmd, offset)) { 1880 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1881 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1882 } 1883 } 1884 } 1885 #else 1886 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1887 {} 1888 #endif 1889 1890 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1891 struct qla_tgt_prm *prm) 1892 { 1893 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1894 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1895 ctio->u.status0.flags |= 1896 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1897 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1898 ctio->u.status0.flags |= __constant_cpu_to_le16( 1899 CTIO7_FLAGS_EXPLICIT_CONFORM | 1900 CTIO7_FLAGS_CONFORM_REQ); 1901 } 1902 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1903 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1904 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1905 int i; 1906 1907 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1908 if (prm->cmd->se_cmd.scsi_status != 0) { 1909 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1910 "Skipping EXPLICIT_CONFORM and " 1911 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1912 "non GOOD status\n"); 1913 goto skip_explict_conf; 1914 } 1915 ctio->u.status1.flags |= __constant_cpu_to_le16( 1916 CTIO7_FLAGS_EXPLICIT_CONFORM | 1917 CTIO7_FLAGS_CONFORM_REQ); 1918 } 1919 skip_explict_conf: 1920 ctio->u.status1.flags &= 1921 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1922 ctio->u.status1.flags |= 1923 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1924 ctio->u.status1.scsi_status |= 1925 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 1926 ctio->u.status1.sense_length = 1927 cpu_to_le16(prm->sense_buffer_len); 1928 for (i = 0; i < prm->sense_buffer_len/4; i++) 1929 ((uint32_t *)ctio->u.status1.sense_data)[i] = 1930 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 1931 #if 0 1932 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 1933 static int q; 1934 if (q < 10) { 1935 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 1936 "qla_target(%d): %d bytes of sense " 1937 "lost", prm->tgt->ha->vp_idx, 1938 prm->sense_buffer_len % 4); 1939 q++; 1940 } 1941 } 1942 #endif 1943 } else { 1944 ctio->u.status1.flags &= 1945 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1946 ctio->u.status1.flags |= 1947 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1948 ctio->u.status1.sense_length = 0; 1949 memset(ctio->u.status1.sense_data, 0, 1950 sizeof(ctio->u.status1.sense_data)); 1951 } 1952 1953 /* Sense with len > 24, is it possible ??? */ 1954 } 1955 1956 1957 1958 /* diff */ 1959 static inline int 1960 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 1961 { 1962 /* 1963 * Uncomment when corresponding SCSI changes are done. 1964 * 1965 if (!sp->cmd->prot_chk) 1966 return 0; 1967 * 1968 */ 1969 switch (se_cmd->prot_op) { 1970 case TARGET_PROT_DOUT_INSERT: 1971 case TARGET_PROT_DIN_STRIP: 1972 if (ql2xenablehba_err_chk >= 1) 1973 return 1; 1974 break; 1975 case TARGET_PROT_DOUT_PASS: 1976 case TARGET_PROT_DIN_PASS: 1977 if (ql2xenablehba_err_chk >= 2) 1978 return 1; 1979 break; 1980 case TARGET_PROT_DIN_INSERT: 1981 case TARGET_PROT_DOUT_STRIP: 1982 return 1; 1983 default: 1984 break; 1985 } 1986 return 0; 1987 } 1988 1989 /* 1990 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 1991 * 1992 */ 1993 static inline void 1994 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 1995 { 1996 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 1997 1998 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 1999 * have been immplemented by TCM, before AppTag is avail. 2000 * Look for modesense_handlers[] 2001 */ 2002 ctx->app_tag = 0; 2003 ctx->app_tag_mask[0] = 0x0; 2004 ctx->app_tag_mask[1] = 0x0; 2005 2006 switch (se_cmd->prot_type) { 2007 case TARGET_DIF_TYPE0_PROT: 2008 /* 2009 * No check for ql2xenablehba_err_chk, as it would be an 2010 * I/O error if hba tag generation is not done. 2011 */ 2012 ctx->ref_tag = cpu_to_le32(lba); 2013 2014 if (!qlt_hba_err_chk_enabled(se_cmd)) 2015 break; 2016 2017 /* enable ALL bytes of the ref tag */ 2018 ctx->ref_tag_mask[0] = 0xff; 2019 ctx->ref_tag_mask[1] = 0xff; 2020 ctx->ref_tag_mask[2] = 0xff; 2021 ctx->ref_tag_mask[3] = 0xff; 2022 break; 2023 /* 2024 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 2025 * 16 bit app tag. 2026 */ 2027 case TARGET_DIF_TYPE1_PROT: 2028 ctx->ref_tag = cpu_to_le32(lba); 2029 2030 if (!qlt_hba_err_chk_enabled(se_cmd)) 2031 break; 2032 2033 /* enable ALL bytes of the ref tag */ 2034 ctx->ref_tag_mask[0] = 0xff; 2035 ctx->ref_tag_mask[1] = 0xff; 2036 ctx->ref_tag_mask[2] = 0xff; 2037 ctx->ref_tag_mask[3] = 0xff; 2038 break; 2039 /* 2040 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2041 * match LBA in CDB + N 2042 */ 2043 case TARGET_DIF_TYPE2_PROT: 2044 ctx->ref_tag = cpu_to_le32(lba); 2045 2046 if (!qlt_hba_err_chk_enabled(se_cmd)) 2047 break; 2048 2049 /* enable ALL bytes of the ref tag */ 2050 ctx->ref_tag_mask[0] = 0xff; 2051 ctx->ref_tag_mask[1] = 0xff; 2052 ctx->ref_tag_mask[2] = 0xff; 2053 ctx->ref_tag_mask[3] = 0xff; 2054 break; 2055 2056 /* For Type 3 protection: 16 bit GUARD only */ 2057 case TARGET_DIF_TYPE3_PROT: 2058 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2059 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2060 break; 2061 } 2062 } 2063 2064 2065 static inline int 2066 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2067 { 2068 uint32_t *cur_dsd; 2069 int sgc; 2070 uint32_t transfer_length = 0; 2071 uint32_t data_bytes; 2072 uint32_t dif_bytes; 2073 uint8_t bundling = 1; 2074 uint8_t *clr_ptr; 2075 struct crc_context *crc_ctx_pkt = NULL; 2076 struct qla_hw_data *ha; 2077 struct ctio_crc2_to_fw *pkt; 2078 dma_addr_t crc_ctx_dma; 2079 uint16_t fw_prot_opts = 0; 2080 struct qla_tgt_cmd *cmd = prm->cmd; 2081 struct se_cmd *se_cmd = &cmd->se_cmd; 2082 uint32_t h; 2083 struct atio_from_isp *atio = &prm->cmd->atio; 2084 uint16_t t16; 2085 2086 sgc = 0; 2087 ha = vha->hw; 2088 2089 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; 2090 prm->pkt = pkt; 2091 memset(pkt, 0, sizeof(*pkt)); 2092 2093 ql_dbg(ql_dbg_tgt, vha, 0xe071, 2094 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2095 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2096 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2097 2098 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2099 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2100 bundling = 0; 2101 2102 /* Compute dif len and adjust data len to incude protection */ 2103 data_bytes = cmd->bufflen; 2104 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2105 2106 switch (se_cmd->prot_op) { 2107 case TARGET_PROT_DIN_INSERT: 2108 case TARGET_PROT_DOUT_STRIP: 2109 transfer_length = data_bytes; 2110 data_bytes += dif_bytes; 2111 break; 2112 2113 case TARGET_PROT_DIN_STRIP: 2114 case TARGET_PROT_DOUT_INSERT: 2115 case TARGET_PROT_DIN_PASS: 2116 case TARGET_PROT_DOUT_PASS: 2117 transfer_length = data_bytes + dif_bytes; 2118 break; 2119 2120 default: 2121 BUG(); 2122 break; 2123 } 2124 2125 if (!qlt_hba_err_chk_enabled(se_cmd)) 2126 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2127 /* HBA error checking enabled */ 2128 else if (IS_PI_UNINIT_CAPABLE(ha)) { 2129 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2130 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2131 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 2132 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2133 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2134 } 2135 2136 switch (se_cmd->prot_op) { 2137 case TARGET_PROT_DIN_INSERT: 2138 case TARGET_PROT_DOUT_INSERT: 2139 fw_prot_opts |= PO_MODE_DIF_INSERT; 2140 break; 2141 case TARGET_PROT_DIN_STRIP: 2142 case TARGET_PROT_DOUT_STRIP: 2143 fw_prot_opts |= PO_MODE_DIF_REMOVE; 2144 break; 2145 case TARGET_PROT_DIN_PASS: 2146 case TARGET_PROT_DOUT_PASS: 2147 fw_prot_opts |= PO_MODE_DIF_PASS; 2148 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 2149 break; 2150 default:/* Normal Request */ 2151 fw_prot_opts |= PO_MODE_DIF_PASS; 2152 break; 2153 } 2154 2155 2156 /* ---- PKT ---- */ 2157 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2158 pkt->entry_type = CTIO_CRC2; 2159 pkt->entry_count = 1; 2160 pkt->vp_index = vha->vp_idx; 2161 2162 h = qlt_make_handle(vha); 2163 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2164 /* 2165 * CTIO type 7 from the firmware doesn't provide a way to 2166 * know the initiator's LOOP ID, hence we can't find 2167 * the session and, so, the command. 2168 */ 2169 return -EAGAIN; 2170 } else 2171 ha->tgt.cmds[h-1] = prm->cmd; 2172 2173 2174 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2175 pkt->nport_handle = prm->cmd->loop_id; 2176 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2177 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2178 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2179 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2180 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2181 2182 /* silence compile warning */ 2183 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2184 pkt->ox_id = cpu_to_le16(t16); 2185 2186 t16 = (atio->u.isp24.attr << 9); 2187 pkt->flags |= cpu_to_le16(t16); 2188 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2189 2190 /* Set transfer direction */ 2191 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2192 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2193 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2194 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2195 2196 2197 pkt->dseg_count = prm->tot_dsds; 2198 /* Fibre channel byte count */ 2199 pkt->transfer_length = cpu_to_le32(transfer_length); 2200 2201 2202 /* ----- CRC context -------- */ 2203 2204 /* Allocate CRC context from global pool */ 2205 crc_ctx_pkt = cmd->ctx = 2206 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 2207 2208 if (!crc_ctx_pkt) 2209 goto crc_queuing_error; 2210 2211 /* Zero out CTX area. */ 2212 clr_ptr = (uint8_t *)crc_ctx_pkt; 2213 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 2214 2215 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 2216 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 2217 2218 /* Set handle */ 2219 crc_ctx_pkt->handle = pkt->handle; 2220 2221 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2222 2223 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2224 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2225 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2226 2227 2228 if (!bundling) { 2229 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2230 } else { 2231 /* 2232 * Configure Bundling if we need to fetch interlaving 2233 * protection PCI accesses 2234 */ 2235 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 2236 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 2237 crc_ctx_pkt->u.bundling.dseg_count = 2238 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 2239 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 2240 } 2241 2242 /* Finish the common fields of CRC pkt */ 2243 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 2244 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 2245 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2246 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); 2247 2248 2249 /* Walks data segments */ 2250 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2251 2252 if (!bundling && prm->prot_seg_cnt) { 2253 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2254 prm->tot_dsds, cmd)) 2255 goto crc_queuing_error; 2256 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2257 (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2258 goto crc_queuing_error; 2259 2260 if (bundling && prm->prot_seg_cnt) { 2261 /* Walks dif segments */ 2262 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 2263 2264 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2265 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2266 prm->prot_seg_cnt, cmd)) 2267 goto crc_queuing_error; 2268 } 2269 return QLA_SUCCESS; 2270 2271 crc_queuing_error: 2272 /* Cleanup will be performed by the caller */ 2273 2274 return QLA_FUNCTION_FAILED; 2275 } 2276 2277 2278 /* 2279 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 2280 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 2281 */ 2282 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 2283 uint8_t scsi_status) 2284 { 2285 struct scsi_qla_host *vha = cmd->vha; 2286 struct qla_hw_data *ha = vha->hw; 2287 struct ctio7_to_24xx *pkt; 2288 struct qla_tgt_prm prm; 2289 uint32_t full_req_cnt = 0; 2290 unsigned long flags = 0; 2291 int res; 2292 2293 memset(&prm, 0, sizeof(prm)); 2294 qlt_check_srr_debug(cmd, &xmit_type); 2295 2296 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 2297 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", 2298 (xmit_type & QLA_TGT_XMIT_STATUS) ? 2299 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 2300 &cmd->se_cmd); 2301 2302 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2303 &full_req_cnt); 2304 if (unlikely(res != 0)) { 2305 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 2306 return 0; 2307 2308 return res; 2309 } 2310 2311 spin_lock_irqsave(&ha->hardware_lock, flags); 2312 2313 /* Does F/W have an IOCBs for this request */ 2314 res = qlt_check_reserve_free_req(vha, full_req_cnt); 2315 if (unlikely(res)) 2316 goto out_unmap_unlock; 2317 2318 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 2319 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2320 else 2321 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2322 if (unlikely(res != 0)) 2323 goto out_unmap_unlock; 2324 2325 2326 pkt = (struct ctio7_to_24xx *)prm.pkt; 2327 2328 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 2329 pkt->u.status0.flags |= 2330 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2331 CTIO7_FLAGS_STATUS_MODE_0); 2332 2333 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2334 qlt_load_data_segments(&prm, vha); 2335 2336 if (prm.add_status_pkt == 0) { 2337 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2338 pkt->u.status0.scsi_status = 2339 cpu_to_le16(prm.rq_result); 2340 pkt->u.status0.residual = 2341 cpu_to_le32(prm.residual); 2342 pkt->u.status0.flags |= __constant_cpu_to_le16( 2343 CTIO7_FLAGS_SEND_STATUS); 2344 if (qlt_need_explicit_conf(ha, cmd, 0)) { 2345 pkt->u.status0.flags |= 2346 __constant_cpu_to_le16( 2347 CTIO7_FLAGS_EXPLICIT_CONFORM | 2348 CTIO7_FLAGS_CONFORM_REQ); 2349 } 2350 } 2351 2352 } else { 2353 /* 2354 * We have already made sure that there is sufficient 2355 * amount of request entries to not drop HW lock in 2356 * req_pkt(). 2357 */ 2358 struct ctio7_to_24xx *ctio = 2359 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 2360 2361 ql_dbg(ql_dbg_tgt, vha, 0xe019, 2362 "Building additional status packet\n"); 2363 2364 /* 2365 * T10Dif: ctio_crc2_to_fw overlay ontop of 2366 * ctio7_to_24xx 2367 */ 2368 memcpy(ctio, pkt, sizeof(*ctio)); 2369 /* reset back to CTIO7 */ 2370 ctio->entry_count = 1; 2371 ctio->entry_type = CTIO_TYPE7; 2372 ctio->dseg_count = 0; 2373 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 2374 CTIO7_FLAGS_DATA_IN); 2375 2376 /* Real finish is ctio_m1's finish */ 2377 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2378 pkt->u.status0.flags |= __constant_cpu_to_le16( 2379 CTIO7_FLAGS_DONT_RET_CTIO); 2380 2381 /* qlt_24xx_init_ctio_to_isp will correct 2382 * all neccessary fields that's part of CTIO7. 2383 * There should be no residual of CTIO-CRC2 data. 2384 */ 2385 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 2386 &prm); 2387 pr_debug("Status CTIO7: %p\n", ctio); 2388 } 2389 } else 2390 qlt_24xx_init_ctio_to_isp(pkt, &prm); 2391 2392 2393 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2394 2395 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 2396 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 2397 pkt, scsi_status); 2398 2399 qla2x00_start_iocbs(vha, vha->req); 2400 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2401 2402 return 0; 2403 2404 out_unmap_unlock: 2405 if (cmd->sg_mapped) 2406 qlt_unmap_sg(vha, cmd); 2407 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2408 2409 return res; 2410 } 2411 EXPORT_SYMBOL(qlt_xmit_response); 2412 2413 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2414 { 2415 struct ctio7_to_24xx *pkt; 2416 struct scsi_qla_host *vha = cmd->vha; 2417 struct qla_hw_data *ha = vha->hw; 2418 struct qla_tgt *tgt = cmd->tgt; 2419 struct qla_tgt_prm prm; 2420 unsigned long flags; 2421 int res = 0; 2422 2423 memset(&prm, 0, sizeof(prm)); 2424 prm.cmd = cmd; 2425 prm.tgt = tgt; 2426 prm.sg = NULL; 2427 prm.req_cnt = 1; 2428 2429 /* Send marker if required */ 2430 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2431 return -EIO; 2432 2433 ql_dbg(ql_dbg_tgt, vha, 0xe01b, 2434 "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", 2435 __func__, (int)vha->vp_idx, &cmd->se_cmd, 2436 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 2437 2438 /* Calculate number of entries and segments required */ 2439 if (qlt_pci_map_calc_cnt(&prm) != 0) 2440 return -EAGAIN; 2441 2442 spin_lock_irqsave(&ha->hardware_lock, flags); 2443 2444 /* Does F/W have an IOCBs for this request */ 2445 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2446 if (res != 0) 2447 goto out_unlock_free_unmap; 2448 if (cmd->se_cmd.prot_op) 2449 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2450 else 2451 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2452 2453 if (unlikely(res != 0)) 2454 goto out_unlock_free_unmap; 2455 pkt = (struct ctio7_to_24xx *)prm.pkt; 2456 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2457 CTIO7_FLAGS_STATUS_MODE_0); 2458 2459 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2460 qlt_load_data_segments(&prm, vha); 2461 2462 cmd->state = QLA_TGT_STATE_NEED_DATA; 2463 2464 qla2x00_start_iocbs(vha, vha->req); 2465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2466 2467 return res; 2468 2469 out_unlock_free_unmap: 2470 if (cmd->sg_mapped) 2471 qlt_unmap_sg(vha, cmd); 2472 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2473 2474 return res; 2475 } 2476 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2477 2478 2479 /* 2480 * Checks the guard or meta-data for the type of error 2481 * detected by the HBA. 2482 */ 2483 static inline int 2484 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 2485 struct ctio_crc_from_fw *sts) 2486 { 2487 uint8_t *ap = &sts->actual_dif[0]; 2488 uint8_t *ep = &sts->expected_dif[0]; 2489 uint32_t e_ref_tag, a_ref_tag; 2490 uint16_t e_app_tag, a_app_tag; 2491 uint16_t e_guard, a_guard; 2492 uint64_t lba = cmd->se_cmd.t_task_lba; 2493 2494 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 2495 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 2496 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 2497 2498 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 2499 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 2500 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 2501 2502 ql_dbg(ql_dbg_tgt, vha, 0xe075, 2503 "iocb(s) %p Returned STATUS.\n", sts); 2504 2505 ql_dbg(ql_dbg_tgt, vha, 0xf075, 2506 "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", 2507 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2508 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); 2509 2510 /* 2511 * Ignore sector if: 2512 * For type 3: ref & app tag is all 'f's 2513 * For type 0,1,2: app tag is all 'f's 2514 */ 2515 if ((a_app_tag == 0xffff) && 2516 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || 2517 (a_ref_tag == 0xffffffff))) { 2518 uint32_t blocks_done; 2519 2520 /* 2TB boundary case covered automatically with this */ 2521 blocks_done = e_ref_tag - (uint32_t)lba + 1; 2522 cmd->se_cmd.bad_sector = e_ref_tag; 2523 cmd->se_cmd.pi_err = 0; 2524 ql_dbg(ql_dbg_tgt, vha, 0xf074, 2525 "need to return scsi good\n"); 2526 2527 /* Update protection tag */ 2528 if (cmd->prot_sg_cnt) { 2529 uint32_t i, j = 0, k = 0, num_ent; 2530 struct scatterlist *sg, *sgl; 2531 2532 2533 sgl = cmd->prot_sg; 2534 2535 /* Patch the corresponding protection tags */ 2536 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { 2537 num_ent = sg_dma_len(sg) / 8; 2538 if (k + num_ent < blocks_done) { 2539 k += num_ent; 2540 continue; 2541 } 2542 j = blocks_done - k - 1; 2543 k = blocks_done; 2544 break; 2545 } 2546 2547 if (k != blocks_done) { 2548 ql_log(ql_log_warn, vha, 0xf076, 2549 "unexpected tag values tag:lba=%u:%llu)\n", 2550 e_ref_tag, (unsigned long long)lba); 2551 goto out; 2552 } 2553 2554 #if 0 2555 struct sd_dif_tuple *spt; 2556 /* TODO: 2557 * This section came from initiator. Is it valid here? 2558 * should ulp be override with actual val??? 2559 */ 2560 spt = page_address(sg_page(sg)) + sg->offset; 2561 spt += j; 2562 2563 spt->app_tag = 0xffff; 2564 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 2565 spt->ref_tag = 0xffffffff; 2566 #endif 2567 } 2568 2569 return 0; 2570 } 2571 2572 /* check guard */ 2573 if (e_guard != a_guard) { 2574 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 2575 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 2576 2577 ql_log(ql_log_warn, vha, 0xe076, 2578 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2579 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2580 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2581 a_guard, e_guard, cmd); 2582 goto out; 2583 } 2584 2585 /* check ref tag */ 2586 if (e_ref_tag != a_ref_tag) { 2587 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 2588 cmd->se_cmd.bad_sector = e_ref_tag; 2589 2590 ql_log(ql_log_warn, vha, 0xe077, 2591 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2592 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2593 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2594 a_guard, e_guard, cmd); 2595 goto out; 2596 } 2597 2598 /* check appl tag */ 2599 if (e_app_tag != a_app_tag) { 2600 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 2601 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 2602 2603 ql_log(ql_log_warn, vha, 0xe078, 2604 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2605 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2606 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2607 a_guard, e_guard, cmd); 2608 goto out; 2609 } 2610 out: 2611 return 1; 2612 } 2613 2614 2615 /* If hardware_lock held on entry, might drop it, then reaquire */ 2616 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2617 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2618 struct qla_tgt_cmd *cmd, 2619 struct atio_from_isp *atio) 2620 { 2621 struct ctio7_to_24xx *ctio24; 2622 struct qla_hw_data *ha = vha->hw; 2623 request_t *pkt; 2624 int ret = 0; 2625 uint16_t temp; 2626 2627 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2628 2629 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2630 if (pkt == NULL) { 2631 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2632 "qla_target(%d): %s failed: unable to allocate " 2633 "request packet\n", vha->vp_idx, __func__); 2634 return -ENOMEM; 2635 } 2636 2637 if (cmd != NULL) { 2638 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2639 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2640 "qla_target(%d): Terminating cmd %p with " 2641 "incorrect state %d\n", vha->vp_idx, cmd, 2642 cmd->state); 2643 } else 2644 ret = 1; 2645 } 2646 2647 pkt->entry_count = 1; 2648 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2649 2650 ctio24 = (struct ctio7_to_24xx *)pkt; 2651 ctio24->entry_type = CTIO_TYPE7; 2652 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2653 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2654 ctio24->vp_index = vha->vp_idx; 2655 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2656 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2657 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2658 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2659 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2660 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2661 CTIO7_FLAGS_TERMINATE); 2662 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2663 ctio24->u.status1.ox_id = cpu_to_le16(temp); 2664 2665 /* Most likely, it isn't needed */ 2666 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2667 &atio->u.isp24.fcp_cmnd.add_cdb[ 2668 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2669 if (ctio24->u.status1.residual != 0) 2670 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2671 2672 qla2x00_start_iocbs(vha, vha->req); 2673 return ret; 2674 } 2675 2676 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2677 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2678 { 2679 unsigned long flags; 2680 int rc; 2681 2682 if (qlt_issue_marker(vha, ha_locked) < 0) 2683 return; 2684 2685 if (ha_locked) { 2686 rc = __qlt_send_term_exchange(vha, cmd, atio); 2687 goto done; 2688 } 2689 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2690 rc = __qlt_send_term_exchange(vha, cmd, atio); 2691 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2692 done: 2693 /* 2694 * Terminate exchange will tell fw to release any active CTIO 2695 * that's in FW posession and cleanup the exchange. 2696 * 2697 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still 2698 * down at FW. Free the cmd later when CTIO comes back later 2699 * w/aborted(0x2) status. 2700 * 2701 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already 2702 * back w/some err. Free the cmd now. 2703 */ 2704 if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { 2705 if (!ha_locked && !in_interrupt()) 2706 msleep(250); /* just in case */ 2707 2708 if (cmd->sg_mapped) 2709 qlt_unmap_sg(vha, cmd); 2710 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2711 } 2712 return; 2713 } 2714 2715 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2716 { 2717 struct qla_tgt_sess *sess = cmd->sess; 2718 2719 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 2720 "%s: se_cmd[%p] ox_id %04x\n", 2721 __func__, &cmd->se_cmd, 2722 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 2723 2724 BUG_ON(cmd->sg_mapped); 2725 if (unlikely(cmd->free_sg)) 2726 kfree(cmd->sg); 2727 2728 if (!sess || !sess->se_sess) { 2729 WARN_ON(1); 2730 return; 2731 } 2732 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 2733 } 2734 EXPORT_SYMBOL(qlt_free_cmd); 2735 2736 /* ha->hardware_lock supposed to be held on entry */ 2737 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2738 struct qla_tgt_cmd *cmd, void *ctio) 2739 { 2740 struct qla_tgt_srr_ctio *sc; 2741 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2742 struct qla_tgt_srr_imm *imm; 2743 2744 tgt->ctio_srr_id++; 2745 2746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2747 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2748 2749 if (!ctio) { 2750 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2751 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2752 vha->vp_idx); 2753 return -EINVAL; 2754 } 2755 2756 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2757 if (sc != NULL) { 2758 sc->cmd = cmd; 2759 /* IRQ is already OFF */ 2760 spin_lock(&tgt->srr_lock); 2761 sc->srr_id = tgt->ctio_srr_id; 2762 list_add_tail(&sc->srr_list_entry, 2763 &tgt->srr_ctio_list); 2764 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2765 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2766 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2767 int found = 0; 2768 list_for_each_entry(imm, &tgt->srr_imm_list, 2769 srr_list_entry) { 2770 if (imm->srr_id == sc->srr_id) { 2771 found = 1; 2772 break; 2773 } 2774 } 2775 if (found) { 2776 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2777 "Scheduling srr work\n"); 2778 schedule_work(&tgt->srr_work); 2779 } else { 2780 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2781 "qla_target(%d): imm_srr_id " 2782 "== ctio_srr_id (%d), but there is no " 2783 "corresponding SRR IMM, deleting CTIO " 2784 "SRR %p\n", vha->vp_idx, 2785 tgt->ctio_srr_id, sc); 2786 list_del(&sc->srr_list_entry); 2787 spin_unlock(&tgt->srr_lock); 2788 2789 kfree(sc); 2790 return -EINVAL; 2791 } 2792 } 2793 spin_unlock(&tgt->srr_lock); 2794 } else { 2795 struct qla_tgt_srr_imm *ti; 2796 2797 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2798 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2799 vha->vp_idx); 2800 spin_lock(&tgt->srr_lock); 2801 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2802 srr_list_entry) { 2803 if (imm->srr_id == tgt->ctio_srr_id) { 2804 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2805 "IMM SRR %p deleted (id %d)\n", 2806 imm, imm->srr_id); 2807 list_del(&imm->srr_list_entry); 2808 qlt_reject_free_srr_imm(vha, imm, 1); 2809 } 2810 } 2811 spin_unlock(&tgt->srr_lock); 2812 2813 return -ENOMEM; 2814 } 2815 2816 return 0; 2817 } 2818 2819 /* 2820 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2821 */ 2822 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2823 struct qla_tgt_cmd *cmd, uint32_t status) 2824 { 2825 int term = 0; 2826 2827 if (ctio != NULL) { 2828 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2829 term = !(c->flags & 2830 __constant_cpu_to_le16(OF_TERM_EXCH)); 2831 } else 2832 term = 1; 2833 2834 if (term) 2835 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2836 2837 return term; 2838 } 2839 2840 /* ha->hardware_lock supposed to be held on entry */ 2841 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2842 uint32_t handle) 2843 { 2844 struct qla_hw_data *ha = vha->hw; 2845 2846 handle--; 2847 if (ha->tgt.cmds[handle] != NULL) { 2848 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2849 ha->tgt.cmds[handle] = NULL; 2850 return cmd; 2851 } else 2852 return NULL; 2853 } 2854 2855 /* ha->hardware_lock supposed to be held on entry */ 2856 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2857 uint32_t handle, void *ctio) 2858 { 2859 struct qla_tgt_cmd *cmd = NULL; 2860 2861 /* Clear out internal marks */ 2862 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2863 CTIO_INTERMEDIATE_HANDLE_MARK); 2864 2865 if (handle != QLA_TGT_NULL_HANDLE) { 2866 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2867 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2868 "SKIP_HANDLE CTIO\n"); 2869 return NULL; 2870 } 2871 /* handle-1 is actually used */ 2872 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2873 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2874 "qla_target(%d): Wrong handle %x received\n", 2875 vha->vp_idx, handle); 2876 return NULL; 2877 } 2878 cmd = qlt_get_cmd(vha, handle); 2879 if (unlikely(cmd == NULL)) { 2880 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2881 "qla_target(%d): Suspicious: unable to " 2882 "find the command with handle %x\n", vha->vp_idx, 2883 handle); 2884 return NULL; 2885 } 2886 } else if (ctio != NULL) { 2887 /* We can't get loop ID from CTIO7 */ 2888 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2889 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2890 "support NULL handles\n", vha->vp_idx); 2891 return NULL; 2892 } 2893 2894 return cmd; 2895 } 2896 2897 /* 2898 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2899 */ 2900 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2901 uint32_t status, void *ctio) 2902 { 2903 struct qla_hw_data *ha = vha->hw; 2904 struct se_cmd *se_cmd; 2905 struct target_core_fabric_ops *tfo; 2906 struct qla_tgt_cmd *cmd; 2907 2908 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2909 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2910 vha->vp_idx, ctio, status, handle); 2911 2912 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2913 /* That could happen only in case of an error/reset/abort */ 2914 if (status != CTIO_SUCCESS) { 2915 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2916 "Intermediate CTIO received" 2917 " (status %x)\n", status); 2918 } 2919 return; 2920 } 2921 2922 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2923 if (cmd == NULL) 2924 return; 2925 2926 se_cmd = &cmd->se_cmd; 2927 tfo = se_cmd->se_tfo; 2928 2929 if (cmd->sg_mapped) 2930 qlt_unmap_sg(vha, cmd); 2931 2932 if (unlikely(status != CTIO_SUCCESS)) { 2933 switch (status & 0xFFFF) { 2934 case CTIO_LIP_RESET: 2935 case CTIO_TARGET_RESET: 2936 case CTIO_ABORTED: 2937 /* driver request abort via Terminate exchange */ 2938 case CTIO_TIMEOUT: 2939 case CTIO_INVALID_RX_ID: 2940 /* They are OK */ 2941 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2942 "qla_target(%d): CTIO with " 2943 "status %#x received, state %x, se_cmd %p, " 2944 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2945 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2946 status, cmd->state, se_cmd); 2947 break; 2948 2949 case CTIO_PORT_LOGGED_OUT: 2950 case CTIO_PORT_UNAVAILABLE: 2951 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2952 "qla_target(%d): CTIO with PORT LOGGED " 2953 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2954 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2955 status, cmd->state, se_cmd); 2956 break; 2957 2958 case CTIO_SRR_RECEIVED: 2959 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2960 "qla_target(%d): CTIO with SRR_RECEIVED" 2961 " status %x received (state %x, se_cmd %p)\n", 2962 vha->vp_idx, status, cmd->state, se_cmd); 2963 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2964 break; 2965 else 2966 return; 2967 2968 case CTIO_DIF_ERROR: { 2969 struct ctio_crc_from_fw *crc = 2970 (struct ctio_crc_from_fw *)ctio; 2971 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 2972 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 2973 vha->vp_idx, status, cmd->state, se_cmd, 2974 *((u64 *)&crc->actual_dif[0]), 2975 *((u64 *)&crc->expected_dif[0])); 2976 2977 if (qlt_handle_dif_error(vha, cmd, ctio)) { 2978 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2979 /* scsi Write/xfer rdy complete */ 2980 goto skip_term; 2981 } else { 2982 /* scsi read/xmit respond complete 2983 * call handle dif to send scsi status 2984 * rather than terminate exchange. 2985 */ 2986 cmd->state = QLA_TGT_STATE_PROCESSED; 2987 ha->tgt.tgt_ops->handle_dif_err(cmd); 2988 return; 2989 } 2990 } else { 2991 /* Need to generate a SCSI good completion. 2992 * because FW did not send scsi status. 2993 */ 2994 status = 0; 2995 goto skip_term; 2996 } 2997 break; 2998 } 2999 default: 3000 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 3001 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 3002 vha->vp_idx, status, cmd->state, se_cmd); 3003 break; 3004 } 3005 3006 3007 /* "cmd->state == QLA_TGT_STATE_ABORTED" means 3008 * cmd is already aborted/terminated, we don't 3009 * need to terminate again. The exchange is already 3010 * cleaned up/freed at FW level. Just cleanup at driver 3011 * level. 3012 */ 3013 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3014 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3015 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3016 return; 3017 } 3018 } 3019 skip_term: 3020 3021 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3022 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 3023 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3024 int rx_status = 0; 3025 3026 cmd->state = QLA_TGT_STATE_DATA_IN; 3027 3028 if (unlikely(status != CTIO_SUCCESS)) 3029 rx_status = -EIO; 3030 else 3031 cmd->write_data_transferred = 1; 3032 3033 ql_dbg(ql_dbg_tgt, vha, 0xe020, 3034 "Data received, context %x, rx_status %d\n", 3035 0x0, rx_status); 3036 3037 ha->tgt.tgt_ops->handle_data(cmd); 3038 return; 3039 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3041 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 3042 } else { 3043 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3044 "qla_target(%d): A command in state (%d) should " 3045 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3046 } 3047 3048 if (unlikely(status != CTIO_SUCCESS) && 3049 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3050 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3051 dump_stack(); 3052 } 3053 3054 ha->tgt.tgt_ops->free_cmd(cmd); 3055 } 3056 3057 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 3058 uint8_t task_codes) 3059 { 3060 int fcp_task_attr; 3061 3062 switch (task_codes) { 3063 case ATIO_SIMPLE_QUEUE: 3064 fcp_task_attr = MSG_SIMPLE_TAG; 3065 break; 3066 case ATIO_HEAD_OF_QUEUE: 3067 fcp_task_attr = MSG_HEAD_TAG; 3068 break; 3069 case ATIO_ORDERED_QUEUE: 3070 fcp_task_attr = MSG_ORDERED_TAG; 3071 break; 3072 case ATIO_ACA_QUEUE: 3073 fcp_task_attr = MSG_ACA_TAG; 3074 break; 3075 case ATIO_UNTAGGED: 3076 fcp_task_attr = MSG_SIMPLE_TAG; 3077 break; 3078 default: 3079 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3080 "qla_target: unknown task code %x, use ORDERED instead\n", 3081 task_codes); 3082 fcp_task_attr = MSG_ORDERED_TAG; 3083 break; 3084 } 3085 3086 return fcp_task_attr; 3087 } 3088 3089 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 3090 uint8_t *); 3091 /* 3092 * Process context for I/O path into tcm_qla2xxx code 3093 */ 3094 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 3095 { 3096 scsi_qla_host_t *vha = cmd->vha; 3097 struct qla_hw_data *ha = vha->hw; 3098 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3099 struct qla_tgt_sess *sess = cmd->sess; 3100 struct atio_from_isp *atio = &cmd->atio; 3101 unsigned char *cdb; 3102 unsigned long flags; 3103 uint32_t data_length; 3104 int ret, fcp_task_attr, data_dir, bidi = 0; 3105 3106 if (tgt->tgt_stop) 3107 goto out_term; 3108 3109 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3110 cmd->tag = atio->u.isp24.exchange_addr; 3111 cmd->unpacked_lun = scsilun_to_int( 3112 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 3113 3114 if (atio->u.isp24.fcp_cmnd.rddata && 3115 atio->u.isp24.fcp_cmnd.wrdata) { 3116 bidi = 1; 3117 data_dir = DMA_TO_DEVICE; 3118 } else if (atio->u.isp24.fcp_cmnd.rddata) 3119 data_dir = DMA_FROM_DEVICE; 3120 else if (atio->u.isp24.fcp_cmnd.wrdata) 3121 data_dir = DMA_TO_DEVICE; 3122 else 3123 data_dir = DMA_NONE; 3124 3125 fcp_task_attr = qlt_get_fcp_task_attr(vha, 3126 atio->u.isp24.fcp_cmnd.task_attr); 3127 data_length = be32_to_cpu(get_unaligned((uint32_t *) 3128 &atio->u.isp24.fcp_cmnd.add_cdb[ 3129 atio->u.isp24.fcp_cmnd.add_cdb_len])); 3130 3131 ql_dbg(ql_dbg_tgt, vha, 0xe022, 3132 "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", 3133 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, 3134 cmd->atio.u.isp24.fcp_hdr.ox_id); 3135 3136 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3137 fcp_task_attr, data_dir, bidi); 3138 if (ret != 0) 3139 goto out_term; 3140 /* 3141 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 3142 */ 3143 spin_lock_irqsave(&ha->hardware_lock, flags); 3144 ha->tgt.tgt_ops->put_sess(sess); 3145 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3146 return; 3147 3148 out_term: 3149 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 3150 /* 3151 * cmd has not sent to target yet, so pass NULL as the second 3152 * argument to qlt_send_term_exchange() and free the memory here. 3153 */ 3154 spin_lock_irqsave(&ha->hardware_lock, flags); 3155 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3156 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3157 ha->tgt.tgt_ops->put_sess(sess); 3158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3159 } 3160 3161 static void qlt_do_work(struct work_struct *work) 3162 { 3163 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3164 3165 __qlt_do_work(cmd); 3166 } 3167 3168 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 3169 struct qla_tgt_sess *sess, 3170 struct atio_from_isp *atio) 3171 { 3172 struct se_session *se_sess = sess->se_sess; 3173 struct qla_tgt_cmd *cmd; 3174 int tag; 3175 3176 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 3177 if (tag < 0) 3178 return NULL; 3179 3180 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 3181 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 3182 3183 memcpy(&cmd->atio, atio, sizeof(*atio)); 3184 cmd->state = QLA_TGT_STATE_NEW; 3185 cmd->tgt = vha->vha_tgt.qla_tgt; 3186 cmd->vha = vha; 3187 cmd->se_cmd.map_tag = tag; 3188 cmd->sess = sess; 3189 cmd->loop_id = sess->loop_id; 3190 cmd->conf_compl_supported = sess->conf_compl_supported; 3191 3192 return cmd; 3193 } 3194 3195 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, 3196 uint16_t); 3197 3198 static void qlt_create_sess_from_atio(struct work_struct *work) 3199 { 3200 struct qla_tgt_sess_op *op = container_of(work, 3201 struct qla_tgt_sess_op, work); 3202 scsi_qla_host_t *vha = op->vha; 3203 struct qla_hw_data *ha = vha->hw; 3204 struct qla_tgt_sess *sess; 3205 struct qla_tgt_cmd *cmd; 3206 unsigned long flags; 3207 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3208 3209 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3210 "qla_target(%d): Unable to find wwn login" 3211 " (s_id %x:%x:%x), trying to create it manually\n", 3212 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3213 3214 if (op->atio.u.raw.entry_count > 1) { 3215 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3216 "Dropping multy entry atio %p\n", &op->atio); 3217 goto out_term; 3218 } 3219 3220 mutex_lock(&vha->vha_tgt.tgt_mutex); 3221 sess = qlt_make_local_sess(vha, s_id); 3222 /* sess has an extra creation ref. */ 3223 mutex_unlock(&vha->vha_tgt.tgt_mutex); 3224 3225 if (!sess) 3226 goto out_term; 3227 /* 3228 * Now obtain a pre-allocated session tag using the original op->atio 3229 * packet header, and dispatch into __qlt_do_work() using the existing 3230 * process context. 3231 */ 3232 cmd = qlt_get_tag(vha, sess, &op->atio); 3233 if (!cmd) { 3234 spin_lock_irqsave(&ha->hardware_lock, flags); 3235 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); 3236 ha->tgt.tgt_ops->put_sess(sess); 3237 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3238 kfree(op); 3239 return; 3240 } 3241 /* 3242 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release 3243 * the extra reference taken above by qlt_make_local_sess() 3244 */ 3245 __qlt_do_work(cmd); 3246 kfree(op); 3247 return; 3248 3249 out_term: 3250 spin_lock_irqsave(&ha->hardware_lock, flags); 3251 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3252 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3253 kfree(op); 3254 3255 } 3256 3257 /* ha->hardware_lock supposed to be held on entry */ 3258 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 3259 struct atio_from_isp *atio) 3260 { 3261 struct qla_hw_data *ha = vha->hw; 3262 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3263 struct qla_tgt_sess *sess; 3264 struct qla_tgt_cmd *cmd; 3265 3266 if (unlikely(tgt->tgt_stop)) { 3267 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 3268 "New command while device %p is shutting down\n", tgt); 3269 return -EFAULT; 3270 } 3271 3272 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 3273 if (unlikely(!sess)) { 3274 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), 3275 GFP_ATOMIC); 3276 if (!op) 3277 return -ENOMEM; 3278 3279 memcpy(&op->atio, atio, sizeof(*atio)); 3280 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3281 queue_work(qla_tgt_wq, &op->work); 3282 return 0; 3283 } 3284 /* 3285 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3286 */ 3287 kref_get(&sess->se_sess->sess_kref); 3288 3289 cmd = qlt_get_tag(vha, sess, atio); 3290 if (!cmd) { 3291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 3292 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3293 ha->tgt.tgt_ops->put_sess(sess); 3294 return -ENOMEM; 3295 } 3296 3297 INIT_WORK(&cmd->work, qlt_do_work); 3298 queue_work(qla_tgt_wq, &cmd->work); 3299 return 0; 3300 3301 } 3302 3303 /* ha->hardware_lock supposed to be held on entry */ 3304 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 3305 int fn, void *iocb, int flags) 3306 { 3307 struct scsi_qla_host *vha = sess->vha; 3308 struct qla_hw_data *ha = vha->hw; 3309 struct qla_tgt_mgmt_cmd *mcmd; 3310 int res; 3311 uint8_t tmr_func; 3312 3313 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 3314 if (!mcmd) { 3315 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 3316 "qla_target(%d): Allocation of management " 3317 "command failed, some commands and their data could " 3318 "leak\n", vha->vp_idx); 3319 return -ENOMEM; 3320 } 3321 memset(mcmd, 0, sizeof(*mcmd)); 3322 mcmd->sess = sess; 3323 3324 if (iocb) { 3325 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 3326 sizeof(mcmd->orig_iocb.imm_ntfy)); 3327 } 3328 mcmd->tmr_func = fn; 3329 mcmd->flags = flags; 3330 3331 switch (fn) { 3332 case QLA_TGT_CLEAR_ACA: 3333 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 3334 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 3335 tmr_func = TMR_CLEAR_ACA; 3336 break; 3337 3338 case QLA_TGT_TARGET_RESET: 3339 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 3340 "qla_target(%d): TARGET_RESET received\n", 3341 sess->vha->vp_idx); 3342 tmr_func = TMR_TARGET_WARM_RESET; 3343 break; 3344 3345 case QLA_TGT_LUN_RESET: 3346 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3347 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3348 tmr_func = TMR_LUN_RESET; 3349 break; 3350 3351 case QLA_TGT_CLEAR_TS: 3352 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 3353 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 3354 tmr_func = TMR_CLEAR_TASK_SET; 3355 break; 3356 3357 case QLA_TGT_ABORT_TS: 3358 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 3359 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 3360 tmr_func = TMR_ABORT_TASK_SET; 3361 break; 3362 #if 0 3363 case QLA_TGT_ABORT_ALL: 3364 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 3365 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 3366 sess->vha->vp_idx); 3367 tmr_func = 0; 3368 break; 3369 3370 case QLA_TGT_ABORT_ALL_SESS: 3371 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 3372 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 3373 sess->vha->vp_idx); 3374 tmr_func = 0; 3375 break; 3376 3377 case QLA_TGT_NEXUS_LOSS_SESS: 3378 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 3379 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 3380 sess->vha->vp_idx); 3381 tmr_func = 0; 3382 break; 3383 3384 case QLA_TGT_NEXUS_LOSS: 3385 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 3386 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 3387 tmr_func = 0; 3388 break; 3389 #endif 3390 default: 3391 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 3392 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 3393 sess->vha->vp_idx, fn); 3394 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3395 return -ENOSYS; 3396 } 3397 3398 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 3399 if (res != 0) { 3400 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 3401 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 3402 sess->vha->vp_idx, res); 3403 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3404 return -EFAULT; 3405 } 3406 3407 return 0; 3408 } 3409 3410 /* ha->hardware_lock supposed to be held on entry */ 3411 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 3412 { 3413 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 3414 struct qla_hw_data *ha = vha->hw; 3415 struct qla_tgt *tgt; 3416 struct qla_tgt_sess *sess; 3417 uint32_t lun, unpacked_lun; 3418 int lun_size, fn; 3419 3420 tgt = vha->vha_tgt.qla_tgt; 3421 3422 lun = a->u.isp24.fcp_cmnd.lun; 3423 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 3424 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 3425 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3426 a->u.isp24.fcp_hdr.s_id); 3427 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 3428 3429 if (!sess) { 3430 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 3431 "qla_target(%d): task mgmt fn 0x%x for " 3432 "non-existant session\n", vha->vp_idx, fn); 3433 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 3434 sizeof(struct atio_from_isp)); 3435 } 3436 3437 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3438 } 3439 3440 /* ha->hardware_lock supposed to be held on entry */ 3441 static int __qlt_abort_task(struct scsi_qla_host *vha, 3442 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 3443 { 3444 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 3445 struct qla_hw_data *ha = vha->hw; 3446 struct qla_tgt_mgmt_cmd *mcmd; 3447 uint32_t lun, unpacked_lun; 3448 int rc; 3449 3450 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 3451 if (mcmd == NULL) { 3452 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 3453 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 3454 vha->vp_idx, __func__); 3455 return -ENOMEM; 3456 } 3457 memset(mcmd, 0, sizeof(*mcmd)); 3458 3459 mcmd->sess = sess; 3460 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 3461 sizeof(mcmd->orig_iocb.imm_ntfy)); 3462 3463 lun = a->u.isp24.fcp_cmnd.lun; 3464 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 3465 3466 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 3467 le16_to_cpu(iocb->u.isp2x.seq_id)); 3468 if (rc != 0) { 3469 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 3470 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 3471 vha->vp_idx, rc); 3472 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3473 return -EFAULT; 3474 } 3475 3476 return 0; 3477 } 3478 3479 /* ha->hardware_lock supposed to be held on entry */ 3480 static int qlt_abort_task(struct scsi_qla_host *vha, 3481 struct imm_ntfy_from_isp *iocb) 3482 { 3483 struct qla_hw_data *ha = vha->hw; 3484 struct qla_tgt_sess *sess; 3485 int loop_id; 3486 3487 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 3488 3489 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 3490 if (sess == NULL) { 3491 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 3492 "qla_target(%d): task abort for unexisting " 3493 "session\n", vha->vp_idx); 3494 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 3495 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 3496 } 3497 3498 return __qlt_abort_task(vha, iocb, sess); 3499 } 3500 3501 /* 3502 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3503 */ 3504 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 3505 struct imm_ntfy_from_isp *iocb) 3506 { 3507 int res = 0; 3508 3509 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 3510 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 3511 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 3512 3513 switch (iocb->u.isp24.status_subcode) { 3514 case ELS_PLOGI: 3515 case ELS_FLOGI: 3516 case ELS_PRLI: 3517 case ELS_LOGO: 3518 case ELS_PRLO: 3519 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 3520 break; 3521 case ELS_PDISC: 3522 case ELS_ADISC: 3523 { 3524 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3525 if (tgt->link_reinit_iocb_pending) { 3526 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3527 0, 0, 0, 0, 0, 0); 3528 tgt->link_reinit_iocb_pending = 0; 3529 } 3530 res = 1; /* send notify ack */ 3531 break; 3532 } 3533 3534 default: 3535 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 3536 "qla_target(%d): Unsupported ELS command %x " 3537 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 3538 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 3539 break; 3540 } 3541 3542 return res; 3543 } 3544 3545 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 3546 { 3547 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 3548 size_t first_offset = 0, rem_offset = offset, tmp = 0; 3549 int i, sg_srr_cnt, bufflen = 0; 3550 3551 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 3552 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 3553 "cmd->sg_cnt: %u, direction: %d\n", 3554 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 3555 3556 /* 3557 * FIXME: Reject non zero SRR relative offset until we can test 3558 * this code properly. 3559 */ 3560 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 3561 return -1; 3562 3563 if (!cmd->sg || !cmd->sg_cnt) { 3564 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 3565 "Missing cmd->sg or zero cmd->sg_cnt in" 3566 " qla_tgt_set_data_offset\n"); 3567 return -EINVAL; 3568 } 3569 /* 3570 * Walk the current cmd->sg list until we locate the new sg_srr_start 3571 */ 3572 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 3573 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 3574 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 3575 i, sg, sg_page(sg), sg->length, sg->offset); 3576 3577 if ((sg->length + tmp) > offset) { 3578 first_offset = rem_offset; 3579 sg_srr_start = sg; 3580 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 3581 "Found matching sg[%d], using %p as sg_srr_start, " 3582 "and using first_offset: %zu\n", i, sg, 3583 first_offset); 3584 break; 3585 } 3586 tmp += sg->length; 3587 rem_offset -= sg->length; 3588 } 3589 3590 if (!sg_srr_start) { 3591 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 3592 "Unable to locate sg_srr_start for offset: %u\n", offset); 3593 return -EINVAL; 3594 } 3595 sg_srr_cnt = (cmd->sg_cnt - i); 3596 3597 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 3598 if (!sg_srr) { 3599 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 3600 "Unable to allocate sgp\n"); 3601 return -ENOMEM; 3602 } 3603 sg_init_table(sg_srr, sg_srr_cnt); 3604 sgp = &sg_srr[0]; 3605 /* 3606 * Walk the remaining list for sg_srr_start, mapping to the newly 3607 * allocated sg_srr taking first_offset into account. 3608 */ 3609 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 3610 if (first_offset) { 3611 sg_set_page(sgp, sg_page(sg), 3612 (sg->length - first_offset), first_offset); 3613 first_offset = 0; 3614 } else { 3615 sg_set_page(sgp, sg_page(sg), sg->length, 0); 3616 } 3617 bufflen += sgp->length; 3618 3619 sgp = sg_next(sgp); 3620 if (!sgp) 3621 break; 3622 } 3623 3624 cmd->sg = sg_srr; 3625 cmd->sg_cnt = sg_srr_cnt; 3626 cmd->bufflen = bufflen; 3627 cmd->offset += offset; 3628 cmd->free_sg = 1; 3629 3630 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 3631 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 3632 cmd->sg_cnt); 3633 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 3634 cmd->bufflen); 3635 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 3636 cmd->offset); 3637 3638 if (cmd->sg_cnt < 0) 3639 BUG(); 3640 3641 if (cmd->bufflen < 0) 3642 BUG(); 3643 3644 return 0; 3645 } 3646 3647 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 3648 uint32_t srr_rel_offs, int *xmit_type) 3649 { 3650 int res = 0, rel_offs; 3651 3652 rel_offs = srr_rel_offs - cmd->offset; 3653 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 3654 srr_rel_offs, rel_offs); 3655 3656 *xmit_type = QLA_TGT_XMIT_ALL; 3657 3658 if (rel_offs < 0) { 3659 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 3660 "qla_target(%d): SRR rel_offs (%d) < 0", 3661 cmd->vha->vp_idx, rel_offs); 3662 res = -1; 3663 } else if (rel_offs == cmd->bufflen) 3664 *xmit_type = QLA_TGT_XMIT_STATUS; 3665 else if (rel_offs > 0) 3666 res = qlt_set_data_offset(cmd, rel_offs); 3667 3668 return res; 3669 } 3670 3671 /* No locks, thread context */ 3672 static void qlt_handle_srr(struct scsi_qla_host *vha, 3673 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 3674 { 3675 struct imm_ntfy_from_isp *ntfy = 3676 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 3677 struct qla_hw_data *ha = vha->hw; 3678 struct qla_tgt_cmd *cmd = sctio->cmd; 3679 struct se_cmd *se_cmd = &cmd->se_cmd; 3680 unsigned long flags; 3681 int xmit_type = 0, resp = 0; 3682 uint32_t offset; 3683 uint16_t srr_ui; 3684 3685 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 3686 srr_ui = ntfy->u.isp24.srr_ui; 3687 3688 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 3689 cmd, srr_ui); 3690 3691 switch (srr_ui) { 3692 case SRR_IU_STATUS: 3693 spin_lock_irqsave(&ha->hardware_lock, flags); 3694 qlt_send_notify_ack(vha, ntfy, 3695 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3696 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3697 xmit_type = QLA_TGT_XMIT_STATUS; 3698 resp = 1; 3699 break; 3700 case SRR_IU_DATA_IN: 3701 if (!cmd->sg || !cmd->sg_cnt) { 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3703 "Unable to process SRR_IU_DATA_IN due to" 3704 " missing cmd->sg, state: %d\n", cmd->state); 3705 dump_stack(); 3706 goto out_reject; 3707 } 3708 if (se_cmd->scsi_status != 0) { 3709 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3710 "Rejecting SRR_IU_DATA_IN with non GOOD " 3711 "scsi_status\n"); 3712 goto out_reject; 3713 } 3714 cmd->bufflen = se_cmd->data_length; 3715 3716 if (qlt_has_data(cmd)) { 3717 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3718 goto out_reject; 3719 spin_lock_irqsave(&ha->hardware_lock, flags); 3720 qlt_send_notify_ack(vha, ntfy, 3721 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3722 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3723 resp = 1; 3724 } else { 3725 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3726 "qla_target(%d): SRR for in data for cmd " 3727 "without them (tag %d, SCSI status %d), " 3728 "reject", vha->vp_idx, cmd->tag, 3729 cmd->se_cmd.scsi_status); 3730 goto out_reject; 3731 } 3732 break; 3733 case SRR_IU_DATA_OUT: 3734 if (!cmd->sg || !cmd->sg_cnt) { 3735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3736 "Unable to process SRR_IU_DATA_OUT due to" 3737 " missing cmd->sg\n"); 3738 dump_stack(); 3739 goto out_reject; 3740 } 3741 if (se_cmd->scsi_status != 0) { 3742 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3743 "Rejecting SRR_IU_DATA_OUT" 3744 " with non GOOD scsi_status\n"); 3745 goto out_reject; 3746 } 3747 cmd->bufflen = se_cmd->data_length; 3748 3749 if (qlt_has_data(cmd)) { 3750 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3751 goto out_reject; 3752 spin_lock_irqsave(&ha->hardware_lock, flags); 3753 qlt_send_notify_ack(vha, ntfy, 3754 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3755 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3756 if (xmit_type & QLA_TGT_XMIT_DATA) 3757 qlt_rdy_to_xfer(cmd); 3758 } else { 3759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3760 "qla_target(%d): SRR for out data for cmd " 3761 "without them (tag %d, SCSI status %d), " 3762 "reject", vha->vp_idx, cmd->tag, 3763 cmd->se_cmd.scsi_status); 3764 goto out_reject; 3765 } 3766 break; 3767 default: 3768 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3769 "qla_target(%d): Unknown srr_ui value %x", 3770 vha->vp_idx, srr_ui); 3771 goto out_reject; 3772 } 3773 3774 /* Transmit response in case of status and data-in cases */ 3775 if (resp) 3776 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3777 3778 return; 3779 3780 out_reject: 3781 spin_lock_irqsave(&ha->hardware_lock, flags); 3782 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3783 NOTIFY_ACK_SRR_FLAGS_REJECT, 3784 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3785 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3786 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3787 cmd->state = QLA_TGT_STATE_DATA_IN; 3788 dump_stack(); 3789 } else 3790 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3791 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3792 } 3793 3794 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3795 struct qla_tgt_srr_imm *imm, int ha_locked) 3796 { 3797 struct qla_hw_data *ha = vha->hw; 3798 unsigned long flags = 0; 3799 3800 if (!ha_locked) 3801 spin_lock_irqsave(&ha->hardware_lock, flags); 3802 3803 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3804 NOTIFY_ACK_SRR_FLAGS_REJECT, 3805 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3806 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3807 3808 if (!ha_locked) 3809 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3810 3811 kfree(imm); 3812 } 3813 3814 static void qlt_handle_srr_work(struct work_struct *work) 3815 { 3816 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3817 struct scsi_qla_host *vha = tgt->vha; 3818 struct qla_tgt_srr_ctio *sctio; 3819 unsigned long flags; 3820 3821 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3822 tgt); 3823 3824 restart: 3825 spin_lock_irqsave(&tgt->srr_lock, flags); 3826 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3827 struct qla_tgt_srr_imm *imm, *i, *ti; 3828 struct qla_tgt_cmd *cmd; 3829 struct se_cmd *se_cmd; 3830 3831 imm = NULL; 3832 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3833 srr_list_entry) { 3834 if (i->srr_id == sctio->srr_id) { 3835 list_del(&i->srr_list_entry); 3836 if (imm) { 3837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3838 "qla_target(%d): There must be " 3839 "only one IMM SRR per CTIO SRR " 3840 "(IMM SRR %p, id %d, CTIO %p\n", 3841 vha->vp_idx, i, i->srr_id, sctio); 3842 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3843 } else 3844 imm = i; 3845 } 3846 } 3847 3848 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3849 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3850 sctio->srr_id); 3851 3852 if (imm == NULL) { 3853 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3854 "Not found matching IMM for SRR CTIO (id %d)\n", 3855 sctio->srr_id); 3856 continue; 3857 } else 3858 list_del(&sctio->srr_list_entry); 3859 3860 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3861 3862 cmd = sctio->cmd; 3863 /* 3864 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3865 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3866 * logic.. 3867 */ 3868 cmd->offset = 0; 3869 if (cmd->free_sg) { 3870 kfree(cmd->sg); 3871 cmd->sg = NULL; 3872 cmd->free_sg = 0; 3873 } 3874 se_cmd = &cmd->se_cmd; 3875 3876 cmd->sg_cnt = se_cmd->t_data_nents; 3877 cmd->sg = se_cmd->t_data_sg; 3878 3879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3880 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3881 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3882 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 3883 cmd->sg_cnt, cmd->offset); 3884 3885 qlt_handle_srr(vha, sctio, imm); 3886 3887 kfree(imm); 3888 kfree(sctio); 3889 goto restart; 3890 } 3891 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3892 } 3893 3894 /* ha->hardware_lock supposed to be held on entry */ 3895 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3896 struct imm_ntfy_from_isp *iocb) 3897 { 3898 struct qla_tgt_srr_imm *imm; 3899 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3900 struct qla_tgt_srr_ctio *sctio; 3901 3902 tgt->imm_srr_id++; 3903 3904 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3905 vha->vp_idx); 3906 3907 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3908 if (imm != NULL) { 3909 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3910 3911 /* IRQ is already OFF */ 3912 spin_lock(&tgt->srr_lock); 3913 imm->srr_id = tgt->imm_srr_id; 3914 list_add_tail(&imm->srr_list_entry, 3915 &tgt->srr_imm_list); 3916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3917 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3918 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3919 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3920 int found = 0; 3921 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3922 srr_list_entry) { 3923 if (sctio->srr_id == imm->srr_id) { 3924 found = 1; 3925 break; 3926 } 3927 } 3928 if (found) { 3929 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3930 "Scheduling srr work\n"); 3931 schedule_work(&tgt->srr_work); 3932 } else { 3933 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3934 "qla_target(%d): imm_srr_id " 3935 "== ctio_srr_id (%d), but there is no " 3936 "corresponding SRR CTIO, deleting IMM " 3937 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3938 imm); 3939 list_del(&imm->srr_list_entry); 3940 3941 kfree(imm); 3942 3943 spin_unlock(&tgt->srr_lock); 3944 goto out_reject; 3945 } 3946 } 3947 spin_unlock(&tgt->srr_lock); 3948 } else { 3949 struct qla_tgt_srr_ctio *ts; 3950 3951 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3952 "qla_target(%d): Unable to allocate SRR IMM " 3953 "entry, SRR request will be rejected\n", vha->vp_idx); 3954 3955 /* IRQ is already OFF */ 3956 spin_lock(&tgt->srr_lock); 3957 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3958 srr_list_entry) { 3959 if (sctio->srr_id == tgt->imm_srr_id) { 3960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3961 "CTIO SRR %p deleted (id %d)\n", 3962 sctio, sctio->srr_id); 3963 list_del(&sctio->srr_list_entry); 3964 qlt_send_term_exchange(vha, sctio->cmd, 3965 &sctio->cmd->atio, 1); 3966 kfree(sctio); 3967 } 3968 } 3969 spin_unlock(&tgt->srr_lock); 3970 goto out_reject; 3971 } 3972 3973 return; 3974 3975 out_reject: 3976 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3977 NOTIFY_ACK_SRR_FLAGS_REJECT, 3978 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3979 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3980 } 3981 3982 /* 3983 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3984 */ 3985 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3986 struct imm_ntfy_from_isp *iocb) 3987 { 3988 struct qla_hw_data *ha = vha->hw; 3989 uint32_t add_flags = 0; 3990 int send_notify_ack = 1; 3991 uint16_t status; 3992 3993 status = le16_to_cpu(iocb->u.isp2x.status); 3994 switch (status) { 3995 case IMM_NTFY_LIP_RESET: 3996 { 3997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3998 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3999 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 4000 iocb->u.isp24.status_subcode); 4001 4002 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4003 send_notify_ack = 0; 4004 break; 4005 } 4006 4007 case IMM_NTFY_LIP_LINK_REINIT: 4008 { 4009 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4010 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 4011 "qla_target(%d): LINK REINIT (loop %#x, " 4012 "subcode %x)\n", vha->vp_idx, 4013 le16_to_cpu(iocb->u.isp24.nport_handle), 4014 iocb->u.isp24.status_subcode); 4015 if (tgt->link_reinit_iocb_pending) { 4016 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 4017 0, 0, 0, 0, 0, 0); 4018 } 4019 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 4020 tgt->link_reinit_iocb_pending = 1; 4021 /* 4022 * QLogic requires to wait after LINK REINIT for possible 4023 * PDISC or ADISC ELS commands 4024 */ 4025 send_notify_ack = 0; 4026 break; 4027 } 4028 4029 case IMM_NTFY_PORT_LOGOUT: 4030 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 4031 "qla_target(%d): Port logout (loop " 4032 "%#x, subcode %x)\n", vha->vp_idx, 4033 le16_to_cpu(iocb->u.isp24.nport_handle), 4034 iocb->u.isp24.status_subcode); 4035 4036 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 4037 send_notify_ack = 0; 4038 /* The sessions will be cleared in the callback, if needed */ 4039 break; 4040 4041 case IMM_NTFY_GLBL_TPRLO: 4042 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 4043 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 4044 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4045 send_notify_ack = 0; 4046 /* The sessions will be cleared in the callback, if needed */ 4047 break; 4048 4049 case IMM_NTFY_PORT_CONFIG: 4050 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 4051 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 4052 status); 4053 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4054 send_notify_ack = 0; 4055 /* The sessions will be cleared in the callback, if needed */ 4056 break; 4057 4058 case IMM_NTFY_GLBL_LOGO: 4059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 4060 "qla_target(%d): Link failure detected\n", 4061 vha->vp_idx); 4062 /* I_T nexus loss */ 4063 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4064 send_notify_ack = 0; 4065 break; 4066 4067 case IMM_NTFY_IOCB_OVERFLOW: 4068 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 4069 "qla_target(%d): Cannot provide requested " 4070 "capability (IOCB overflowed the immediate notify " 4071 "resource count)\n", vha->vp_idx); 4072 break; 4073 4074 case IMM_NTFY_ABORT_TASK: 4075 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 4076 "qla_target(%d): Abort Task (S %08x I %#x -> " 4077 "L %#x)\n", vha->vp_idx, 4078 le16_to_cpu(iocb->u.isp2x.seq_id), 4079 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 4080 le16_to_cpu(iocb->u.isp2x.lun)); 4081 if (qlt_abort_task(vha, iocb) == 0) 4082 send_notify_ack = 0; 4083 break; 4084 4085 case IMM_NTFY_RESOURCE: 4086 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 4087 "qla_target(%d): Out of resources, host %ld\n", 4088 vha->vp_idx, vha->host_no); 4089 break; 4090 4091 case IMM_NTFY_MSG_RX: 4092 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 4093 "qla_target(%d): Immediate notify task %x\n", 4094 vha->vp_idx, iocb->u.isp2x.task_flags); 4095 if (qlt_handle_task_mgmt(vha, iocb) == 0) 4096 send_notify_ack = 0; 4097 break; 4098 4099 case IMM_NTFY_ELS: 4100 if (qlt_24xx_handle_els(vha, iocb) == 0) 4101 send_notify_ack = 0; 4102 break; 4103 4104 case IMM_NTFY_SRR: 4105 qlt_prepare_srr_imm(vha, iocb); 4106 send_notify_ack = 0; 4107 break; 4108 4109 default: 4110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 4111 "qla_target(%d): Received unknown immediate " 4112 "notify status %x\n", vha->vp_idx, status); 4113 break; 4114 } 4115 4116 if (send_notify_ack) 4117 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 4118 } 4119 4120 /* 4121 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4122 * This function sends busy to ISP 2xxx or 24xx. 4123 */ 4124 static void qlt_send_busy(struct scsi_qla_host *vha, 4125 struct atio_from_isp *atio, uint16_t status) 4126 { 4127 struct ctio7_to_24xx *ctio24; 4128 struct qla_hw_data *ha = vha->hw; 4129 request_t *pkt; 4130 struct qla_tgt_sess *sess = NULL; 4131 4132 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4133 atio->u.isp24.fcp_hdr.s_id); 4134 if (!sess) { 4135 qlt_send_term_exchange(vha, NULL, atio, 1); 4136 return; 4137 } 4138 /* Sending marker isn't necessary, since we called from ISR */ 4139 4140 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 4141 if (!pkt) { 4142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 4143 "qla_target(%d): %s failed: unable to allocate " 4144 "request packet", vha->vp_idx, __func__); 4145 return; 4146 } 4147 4148 pkt->entry_count = 1; 4149 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 4150 4151 ctio24 = (struct ctio7_to_24xx *)pkt; 4152 ctio24->entry_type = CTIO_TYPE7; 4153 ctio24->nport_handle = sess->loop_id; 4154 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 4155 ctio24->vp_index = vha->vp_idx; 4156 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 4157 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 4158 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 4159 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 4160 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 4161 __constant_cpu_to_le16( 4162 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 4163 CTIO7_FLAGS_DONT_RET_CTIO); 4164 /* 4165 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 4166 * if the explicit conformation is used. 4167 */ 4168 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 4169 ctio24->u.status1.scsi_status = cpu_to_le16(status); 4170 ctio24->u.status1.residual = get_unaligned((uint32_t *) 4171 &atio->u.isp24.fcp_cmnd.add_cdb[ 4172 atio->u.isp24.fcp_cmnd.add_cdb_len]); 4173 if (ctio24->u.status1.residual != 0) 4174 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 4175 4176 qla2x00_start_iocbs(vha, vha->req); 4177 } 4178 4179 /* ha->hardware_lock supposed to be held on entry */ 4180 /* called via callback from qla2xxx */ 4181 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 4182 struct atio_from_isp *atio) 4183 { 4184 struct qla_hw_data *ha = vha->hw; 4185 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4186 int rc; 4187 4188 if (unlikely(tgt == NULL)) { 4189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 4190 "ATIO pkt, but no tgt (ha %p)", ha); 4191 return; 4192 } 4193 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 4194 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 4195 vha->vp_idx, atio, atio->u.raw.entry_type, 4196 atio->u.raw.entry_count); 4197 /* 4198 * In tgt_stop mode we also should allow all requests to pass. 4199 * Otherwise, some commands can stuck. 4200 */ 4201 4202 tgt->irq_cmd_count++; 4203 4204 switch (atio->u.raw.entry_type) { 4205 case ATIO_TYPE7: 4206 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 4207 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", 4208 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 4209 atio->u.isp24.fcp_cmnd.rddata, 4210 atio->u.isp24.fcp_cmnd.wrdata, 4211 atio->u.isp24.fcp_cmnd.cdb[0], 4212 atio->u.isp24.fcp_cmnd.add_cdb_len, 4213 be32_to_cpu(get_unaligned((uint32_t *) 4214 &atio->u.isp24.fcp_cmnd.add_cdb[ 4215 atio->u.isp24.fcp_cmnd.add_cdb_len])), 4216 atio->u.isp24.fcp_hdr.s_id[0], 4217 atio->u.isp24.fcp_hdr.s_id[1], 4218 atio->u.isp24.fcp_hdr.s_id[2]); 4219 4220 if (unlikely(atio->u.isp24.exchange_addr == 4221 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 4222 ql_dbg(ql_dbg_tgt, vha, 0xe058, 4223 "qla_target(%d): ATIO_TYPE7 " 4224 "received with UNKNOWN exchange address, " 4225 "sending QUEUE_FULL\n", vha->vp_idx); 4226 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 4227 break; 4228 } 4229 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 4230 rc = qlt_handle_cmd_for_atio(vha, atio); 4231 else 4232 rc = qlt_handle_task_mgmt(vha, atio); 4233 if (unlikely(rc != 0)) { 4234 if (rc == -ESRCH) { 4235 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 4236 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 4237 #else 4238 qlt_send_term_exchange(vha, NULL, atio, 1); 4239 #endif 4240 } else { 4241 if (tgt->tgt_stop) { 4242 ql_dbg(ql_dbg_tgt, vha, 0xe059, 4243 "qla_target: Unable to send " 4244 "command to target for req, " 4245 "ignoring.\n"); 4246 } else { 4247 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 4248 "qla_target(%d): Unable to send " 4249 "command to target, sending BUSY " 4250 "status.\n", vha->vp_idx); 4251 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 4252 } 4253 } 4254 } 4255 break; 4256 4257 case IMMED_NOTIFY_TYPE: 4258 { 4259 if (unlikely(atio->u.isp2x.entry_status != 0)) { 4260 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 4261 "qla_target(%d): Received ATIO packet %x " 4262 "with error status %x\n", vha->vp_idx, 4263 atio->u.raw.entry_type, 4264 atio->u.isp2x.entry_status); 4265 break; 4266 } 4267 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 4268 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 4269 break; 4270 } 4271 4272 default: 4273 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 4274 "qla_target(%d): Received unknown ATIO atio " 4275 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 4276 break; 4277 } 4278 4279 tgt->irq_cmd_count--; 4280 } 4281 4282 /* ha->hardware_lock supposed to be held on entry */ 4283 /* called via callback from qla2xxx */ 4284 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 4285 { 4286 struct qla_hw_data *ha = vha->hw; 4287 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4288 4289 if (unlikely(tgt == NULL)) { 4290 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 4291 "qla_target(%d): Response pkt %x received, but no " 4292 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 4293 return; 4294 } 4295 4296 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 4297 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 4298 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 4299 pkt->entry_count, pkt->entry_status, pkt->handle); 4300 4301 /* 4302 * In tgt_stop mode we also should allow all requests to pass. 4303 * Otherwise, some commands can stuck. 4304 */ 4305 4306 tgt->irq_cmd_count++; 4307 4308 switch (pkt->entry_type) { 4309 case CTIO_CRC2: 4310 case CTIO_TYPE7: 4311 { 4312 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 4313 ql_dbg(ql_dbg_tgt, vha, 0xe030, 4314 "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", 4315 entry->entry_type, vha->vp_idx); 4316 qlt_do_ctio_completion(vha, entry->handle, 4317 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4318 entry); 4319 break; 4320 } 4321 4322 case ACCEPT_TGT_IO_TYPE: 4323 { 4324 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 4325 int rc; 4326 ql_dbg(ql_dbg_tgt, vha, 0xe031, 4327 "ACCEPT_TGT_IO instance %d status %04x " 4328 "lun %04x read/write %d data_length %04x " 4329 "target_id %02x rx_id %04x\n ", vha->vp_idx, 4330 le16_to_cpu(atio->u.isp2x.status), 4331 le16_to_cpu(atio->u.isp2x.lun), 4332 atio->u.isp2x.execution_codes, 4333 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 4334 atio), atio->u.isp2x.rx_id); 4335 if (atio->u.isp2x.status != 4336 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 4337 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 4338 "qla_target(%d): ATIO with error " 4339 "status %x received\n", vha->vp_idx, 4340 le16_to_cpu(atio->u.isp2x.status)); 4341 break; 4342 } 4343 ql_dbg(ql_dbg_tgt, vha, 0xe032, 4344 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 4345 atio->u.isp2x.cdb[0], (unsigned long 4346 int)sizeof(atio->u.isp2x.cdb)); 4347 4348 rc = qlt_handle_cmd_for_atio(vha, atio); 4349 if (unlikely(rc != 0)) { 4350 if (rc == -ESRCH) { 4351 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 4352 qlt_send_busy(vha, atio, 0); 4353 #else 4354 qlt_send_term_exchange(vha, NULL, atio, 1); 4355 #endif 4356 } else { 4357 if (tgt->tgt_stop) { 4358 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 4359 "qla_target: Unable to send " 4360 "command to target, sending TERM " 4361 "EXCHANGE for rsp\n"); 4362 qlt_send_term_exchange(vha, NULL, 4363 atio, 1); 4364 } else { 4365 ql_dbg(ql_dbg_tgt, vha, 0xe060, 4366 "qla_target(%d): Unable to send " 4367 "command to target, sending BUSY " 4368 "status\n", vha->vp_idx); 4369 qlt_send_busy(vha, atio, 0); 4370 } 4371 } 4372 } 4373 } 4374 break; 4375 4376 case CONTINUE_TGT_IO_TYPE: 4377 { 4378 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4379 ql_dbg(ql_dbg_tgt, vha, 0xe033, 4380 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 4381 qlt_do_ctio_completion(vha, entry->handle, 4382 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4383 entry); 4384 break; 4385 } 4386 4387 case CTIO_A64_TYPE: 4388 { 4389 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4390 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 4391 vha->vp_idx); 4392 qlt_do_ctio_completion(vha, entry->handle, 4393 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4394 entry); 4395 break; 4396 } 4397 4398 case IMMED_NOTIFY_TYPE: 4399 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 4400 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 4401 break; 4402 4403 case NOTIFY_ACK_TYPE: 4404 if (tgt->notify_ack_expected > 0) { 4405 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 4406 ql_dbg(ql_dbg_tgt, vha, 0xe036, 4407 "NOTIFY_ACK seq %08x status %x\n", 4408 le16_to_cpu(entry->u.isp2x.seq_id), 4409 le16_to_cpu(entry->u.isp2x.status)); 4410 tgt->notify_ack_expected--; 4411 if (entry->u.isp2x.status != 4412 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 4413 ql_dbg(ql_dbg_tgt, vha, 0xe061, 4414 "qla_target(%d): NOTIFY_ACK " 4415 "failed %x\n", vha->vp_idx, 4416 le16_to_cpu(entry->u.isp2x.status)); 4417 } 4418 } else { 4419 ql_dbg(ql_dbg_tgt, vha, 0xe062, 4420 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 4421 vha->vp_idx); 4422 } 4423 break; 4424 4425 case ABTS_RECV_24XX: 4426 ql_dbg(ql_dbg_tgt, vha, 0xe037, 4427 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 4428 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 4429 break; 4430 4431 case ABTS_RESP_24XX: 4432 if (tgt->abts_resp_expected > 0) { 4433 struct abts_resp_from_24xx_fw *entry = 4434 (struct abts_resp_from_24xx_fw *)pkt; 4435 ql_dbg(ql_dbg_tgt, vha, 0xe038, 4436 "ABTS_RESP_24XX: compl_status %x\n", 4437 entry->compl_status); 4438 tgt->abts_resp_expected--; 4439 if (le16_to_cpu(entry->compl_status) != 4440 ABTS_RESP_COMPL_SUCCESS) { 4441 if ((entry->error_subcode1 == 0x1E) && 4442 (entry->error_subcode2 == 0)) { 4443 /* 4444 * We've got a race here: aborted 4445 * exchange not terminated, i.e. 4446 * response for the aborted command was 4447 * sent between the abort request was 4448 * received and processed. 4449 * Unfortunately, the firmware has a 4450 * silly requirement that all aborted 4451 * exchanges must be explicitely 4452 * terminated, otherwise it refuses to 4453 * send responses for the abort 4454 * requests. So, we have to 4455 * (re)terminate the exchange and retry 4456 * the abort response. 4457 */ 4458 qlt_24xx_retry_term_exchange(vha, 4459 entry); 4460 } else 4461 ql_dbg(ql_dbg_tgt, vha, 0xe063, 4462 "qla_target(%d): ABTS_RESP_24XX " 4463 "failed %x (subcode %x:%x)", 4464 vha->vp_idx, entry->compl_status, 4465 entry->error_subcode1, 4466 entry->error_subcode2); 4467 } 4468 } else { 4469 ql_dbg(ql_dbg_tgt, vha, 0xe064, 4470 "qla_target(%d): Unexpected ABTS_RESP_24XX " 4471 "received\n", vha->vp_idx); 4472 } 4473 break; 4474 4475 default: 4476 ql_dbg(ql_dbg_tgt, vha, 0xe065, 4477 "qla_target(%d): Received unknown response pkt " 4478 "type %x\n", vha->vp_idx, pkt->entry_type); 4479 break; 4480 } 4481 4482 tgt->irq_cmd_count--; 4483 } 4484 4485 /* 4486 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4487 */ 4488 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 4489 uint16_t *mailbox) 4490 { 4491 struct qla_hw_data *ha = vha->hw; 4492 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4493 int login_code; 4494 4495 ql_dbg(ql_dbg_tgt, vha, 0xe039, 4496 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 4497 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 4498 ha->operating_mode, ha->current_topology); 4499 4500 if (!ha->tgt.tgt_ops) 4501 return; 4502 4503 if (unlikely(tgt == NULL)) { 4504 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 4505 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 4506 return; 4507 } 4508 4509 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 4510 IS_QLA2100(ha)) 4511 return; 4512 /* 4513 * In tgt_stop mode we also should allow all requests to pass. 4514 * Otherwise, some commands can stuck. 4515 */ 4516 4517 tgt->irq_cmd_count++; 4518 4519 switch (code) { 4520 case MBA_RESET: /* Reset */ 4521 case MBA_SYSTEM_ERR: /* System Error */ 4522 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 4523 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 4524 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 4525 "qla_target(%d): System error async event %#x " 4526 "occurred", vha->vp_idx, code); 4527 break; 4528 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 4529 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4530 break; 4531 4532 case MBA_LOOP_UP: 4533 { 4534 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 4535 "qla_target(%d): Async LOOP_UP occurred " 4536 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 4537 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4538 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4539 if (tgt->link_reinit_iocb_pending) { 4540 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 4541 0, 0, 0, 0, 0, 0); 4542 tgt->link_reinit_iocb_pending = 0; 4543 } 4544 break; 4545 } 4546 4547 case MBA_LIP_OCCURRED: 4548 case MBA_LOOP_DOWN: 4549 case MBA_LIP_RESET: 4550 case MBA_RSCN_UPDATE: 4551 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 4552 "qla_target(%d): Async event %#x occurred " 4553 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4554 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4555 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4556 break; 4557 4558 case MBA_PORT_UPDATE: 4559 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 4560 "qla_target(%d): Port update async event %#x " 4561 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 4562 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4563 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4564 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4565 4566 login_code = le16_to_cpu(mailbox[2]); 4567 if (login_code == 0x4) 4568 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 4569 "Async MB 2: Got PLOGI Complete\n"); 4570 else if (login_code == 0x7) 4571 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 4572 "Async MB 2: Port Logged Out\n"); 4573 break; 4574 4575 default: 4576 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 4577 "qla_target(%d): Async event %#x occurred: " 4578 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 4579 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4580 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4581 break; 4582 } 4583 4584 tgt->irq_cmd_count--; 4585 } 4586 4587 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 4588 uint16_t loop_id) 4589 { 4590 fc_port_t *fcport; 4591 int rc; 4592 4593 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 4594 if (!fcport) { 4595 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 4596 "qla_target(%d): Allocation of tmp FC port failed", 4597 vha->vp_idx); 4598 return NULL; 4599 } 4600 4601 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 4602 4603 fcport->loop_id = loop_id; 4604 4605 rc = qla2x00_get_port_database(vha, fcport, 0); 4606 if (rc != QLA_SUCCESS) { 4607 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 4608 "qla_target(%d): Failed to retrieve fcport " 4609 "information -- get_port_database() returned %x " 4610 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 4611 kfree(fcport); 4612 return NULL; 4613 } 4614 4615 return fcport; 4616 } 4617 4618 /* Must be called under tgt_mutex */ 4619 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 4620 uint8_t *s_id) 4621 { 4622 struct qla_tgt_sess *sess = NULL; 4623 fc_port_t *fcport = NULL; 4624 int rc, global_resets; 4625 uint16_t loop_id = 0; 4626 4627 retry: 4628 global_resets = 4629 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 4630 4631 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 4632 if (rc != 0) { 4633 if ((s_id[0] == 0xFF) && 4634 (s_id[1] == 0xFC)) { 4635 /* 4636 * This is Domain Controller, so it should be 4637 * OK to drop SCSI commands from it. 4638 */ 4639 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 4640 "Unable to find initiator with S_ID %x:%x:%x", 4641 s_id[0], s_id[1], s_id[2]); 4642 } else 4643 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 4644 "qla_target(%d): Unable to find " 4645 "initiator with S_ID %x:%x:%x", 4646 vha->vp_idx, s_id[0], s_id[1], 4647 s_id[2]); 4648 return NULL; 4649 } 4650 4651 fcport = qlt_get_port_database(vha, loop_id); 4652 if (!fcport) 4653 return NULL; 4654 4655 if (global_resets != 4656 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 4657 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 4658 "qla_target(%d): global reset during session discovery " 4659 "(counter was %d, new %d), retrying", vha->vp_idx, 4660 global_resets, 4661 atomic_read(&vha->vha_tgt. 4662 qla_tgt->tgt_global_resets_count)); 4663 goto retry; 4664 } 4665 4666 sess = qlt_create_sess(vha, fcport, true); 4667 4668 kfree(fcport); 4669 return sess; 4670 } 4671 4672 static void qlt_abort_work(struct qla_tgt *tgt, 4673 struct qla_tgt_sess_work_param *prm) 4674 { 4675 struct scsi_qla_host *vha = tgt->vha; 4676 struct qla_hw_data *ha = vha->hw; 4677 struct qla_tgt_sess *sess = NULL; 4678 unsigned long flags; 4679 uint32_t be_s_id; 4680 uint8_t s_id[3]; 4681 int rc; 4682 4683 spin_lock_irqsave(&ha->hardware_lock, flags); 4684 4685 if (tgt->tgt_stop) 4686 goto out_term; 4687 4688 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 4689 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 4690 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 4691 4692 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4693 (unsigned char *)&be_s_id); 4694 if (!sess) { 4695 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4696 4697 mutex_lock(&vha->vha_tgt.tgt_mutex); 4698 sess = qlt_make_local_sess(vha, s_id); 4699 /* sess has got an extra creation ref */ 4700 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4701 4702 spin_lock_irqsave(&ha->hardware_lock, flags); 4703 if (!sess) 4704 goto out_term; 4705 } else { 4706 kref_get(&sess->se_sess->sess_kref); 4707 } 4708 4709 if (tgt->tgt_stop) 4710 goto out_term; 4711 4712 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4713 if (rc != 0) 4714 goto out_term; 4715 4716 ha->tgt.tgt_ops->put_sess(sess); 4717 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4718 return; 4719 4720 out_term: 4721 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4722 if (sess) 4723 ha->tgt.tgt_ops->put_sess(sess); 4724 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4725 } 4726 4727 static void qlt_tmr_work(struct qla_tgt *tgt, 4728 struct qla_tgt_sess_work_param *prm) 4729 { 4730 struct atio_from_isp *a = &prm->tm_iocb2; 4731 struct scsi_qla_host *vha = tgt->vha; 4732 struct qla_hw_data *ha = vha->hw; 4733 struct qla_tgt_sess *sess = NULL; 4734 unsigned long flags; 4735 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4736 int rc; 4737 uint32_t lun, unpacked_lun; 4738 int lun_size, fn; 4739 void *iocb; 4740 4741 spin_lock_irqsave(&ha->hardware_lock, flags); 4742 4743 if (tgt->tgt_stop) 4744 goto out_term; 4745 4746 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4747 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4748 if (!sess) { 4749 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4750 4751 mutex_lock(&vha->vha_tgt.tgt_mutex); 4752 sess = qlt_make_local_sess(vha, s_id); 4753 /* sess has got an extra creation ref */ 4754 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4755 4756 spin_lock_irqsave(&ha->hardware_lock, flags); 4757 if (!sess) 4758 goto out_term; 4759 } else { 4760 kref_get(&sess->se_sess->sess_kref); 4761 } 4762 4763 iocb = a; 4764 lun = a->u.isp24.fcp_cmnd.lun; 4765 lun_size = sizeof(lun); 4766 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4767 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4768 4769 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4770 if (rc != 0) 4771 goto out_term; 4772 4773 ha->tgt.tgt_ops->put_sess(sess); 4774 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4775 return; 4776 4777 out_term: 4778 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4779 if (sess) 4780 ha->tgt.tgt_ops->put_sess(sess); 4781 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4782 } 4783 4784 static void qlt_sess_work_fn(struct work_struct *work) 4785 { 4786 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4787 struct scsi_qla_host *vha = tgt->vha; 4788 unsigned long flags; 4789 4790 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4791 4792 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4793 while (!list_empty(&tgt->sess_works_list)) { 4794 struct qla_tgt_sess_work_param *prm = list_entry( 4795 tgt->sess_works_list.next, typeof(*prm), 4796 sess_works_list_entry); 4797 4798 /* 4799 * This work can be scheduled on several CPUs at time, so we 4800 * must delete the entry to eliminate double processing 4801 */ 4802 list_del(&prm->sess_works_list_entry); 4803 4804 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4805 4806 switch (prm->type) { 4807 case QLA_TGT_SESS_WORK_ABORT: 4808 qlt_abort_work(tgt, prm); 4809 break; 4810 case QLA_TGT_SESS_WORK_TM: 4811 qlt_tmr_work(tgt, prm); 4812 break; 4813 default: 4814 BUG_ON(1); 4815 break; 4816 } 4817 4818 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4819 4820 kfree(prm); 4821 } 4822 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4823 } 4824 4825 /* Must be called under tgt_host_action_mutex */ 4826 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4827 { 4828 struct qla_tgt *tgt; 4829 4830 if (!QLA_TGT_MODE_ENABLED()) 4831 return 0; 4832 4833 if (!IS_TGT_MODE_CAPABLE(ha)) { 4834 ql_log(ql_log_warn, base_vha, 0xe070, 4835 "This adapter does not support target mode.\n"); 4836 return 0; 4837 } 4838 4839 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4840 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 4841 4842 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 4843 4844 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4845 if (!tgt) { 4846 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4847 "Unable to allocate struct qla_tgt\n"); 4848 return -ENOMEM; 4849 } 4850 4851 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4852 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4853 4854 tgt->ha = ha; 4855 tgt->vha = base_vha; 4856 init_waitqueue_head(&tgt->waitQ); 4857 INIT_LIST_HEAD(&tgt->sess_list); 4858 INIT_LIST_HEAD(&tgt->del_sess_list); 4859 INIT_DELAYED_WORK(&tgt->sess_del_work, 4860 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4861 spin_lock_init(&tgt->sess_work_lock); 4862 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4863 INIT_LIST_HEAD(&tgt->sess_works_list); 4864 spin_lock_init(&tgt->srr_lock); 4865 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4866 INIT_LIST_HEAD(&tgt->srr_imm_list); 4867 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4868 atomic_set(&tgt->tgt_global_resets_count, 0); 4869 4870 base_vha->vha_tgt.qla_tgt = tgt; 4871 4872 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4873 "qla_target(%d): using 64 Bit PCI addressing", 4874 base_vha->vp_idx); 4875 tgt->tgt_enable_64bit_addr = 1; 4876 /* 3 is reserved */ 4877 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4878 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4879 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4880 4881 if (base_vha->fc_vport) 4882 return 0; 4883 4884 mutex_lock(&qla_tgt_mutex); 4885 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4886 mutex_unlock(&qla_tgt_mutex); 4887 4888 return 0; 4889 } 4890 4891 /* Must be called under tgt_host_action_mutex */ 4892 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4893 { 4894 if (!vha->vha_tgt.qla_tgt) 4895 return 0; 4896 4897 if (vha->fc_vport) { 4898 qlt_release(vha->vha_tgt.qla_tgt); 4899 return 0; 4900 } 4901 mutex_lock(&qla_tgt_mutex); 4902 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4903 mutex_unlock(&qla_tgt_mutex); 4904 4905 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4906 vha->host_no, ha); 4907 qlt_release(vha->vha_tgt.qla_tgt); 4908 4909 return 0; 4910 } 4911 4912 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4913 unsigned char *b) 4914 { 4915 int i; 4916 4917 pr_debug("qla2xxx HW vha->node_name: "); 4918 for (i = 0; i < WWN_SIZE; i++) 4919 pr_debug("%02x ", vha->node_name[i]); 4920 pr_debug("\n"); 4921 pr_debug("qla2xxx HW vha->port_name: "); 4922 for (i = 0; i < WWN_SIZE; i++) 4923 pr_debug("%02x ", vha->port_name[i]); 4924 pr_debug("\n"); 4925 4926 pr_debug("qla2xxx passed configfs WWPN: "); 4927 put_unaligned_be64(wwpn, b); 4928 for (i = 0; i < WWN_SIZE; i++) 4929 pr_debug("%02x ", b[i]); 4930 pr_debug("\n"); 4931 } 4932 4933 /** 4934 * qla_tgt_lport_register - register lport with external module 4935 * 4936 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4937 * @wwpn: Passwd FC target WWPN 4938 * @callback: lport initialization callback for tcm_qla2xxx code 4939 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4940 */ 4941 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 4942 u64 npiv_wwpn, u64 npiv_wwnn, 4943 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 4944 { 4945 struct qla_tgt *tgt; 4946 struct scsi_qla_host *vha; 4947 struct qla_hw_data *ha; 4948 struct Scsi_Host *host; 4949 unsigned long flags; 4950 int rc; 4951 u8 b[WWN_SIZE]; 4952 4953 mutex_lock(&qla_tgt_mutex); 4954 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4955 vha = tgt->vha; 4956 ha = vha->hw; 4957 4958 host = vha->host; 4959 if (!host) 4960 continue; 4961 4962 if (!(host->hostt->supported_mode & MODE_TARGET)) 4963 continue; 4964 4965 spin_lock_irqsave(&ha->hardware_lock, flags); 4966 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 4967 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4968 host->host_no); 4969 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4970 continue; 4971 } 4972 if (tgt->tgt_stop) { 4973 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 4974 host->host_no); 4975 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4976 continue; 4977 } 4978 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4979 4980 if (!scsi_host_get(host)) { 4981 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4982 "Unable to scsi_host_get() for" 4983 " qla2xxx scsi_host\n"); 4984 continue; 4985 } 4986 qlt_lport_dump(vha, phys_wwpn, b); 4987 4988 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4989 scsi_host_put(host); 4990 continue; 4991 } 4992 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4993 if (rc != 0) 4994 scsi_host_put(host); 4995 4996 mutex_unlock(&qla_tgt_mutex); 4997 return rc; 4998 } 4999 mutex_unlock(&qla_tgt_mutex); 5000 5001 return -ENODEV; 5002 } 5003 EXPORT_SYMBOL(qlt_lport_register); 5004 5005 /** 5006 * qla_tgt_lport_deregister - Degister lport 5007 * 5008 * @vha: Registered scsi_qla_host pointer 5009 */ 5010 void qlt_lport_deregister(struct scsi_qla_host *vha) 5011 { 5012 struct qla_hw_data *ha = vha->hw; 5013 struct Scsi_Host *sh = vha->host; 5014 /* 5015 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 5016 */ 5017 vha->vha_tgt.target_lport_ptr = NULL; 5018 ha->tgt.tgt_ops = NULL; 5019 /* 5020 * Release the Scsi_Host reference for the underlying qla2xxx host 5021 */ 5022 scsi_host_put(sh); 5023 } 5024 EXPORT_SYMBOL(qlt_lport_deregister); 5025 5026 /* Must be called under HW lock */ 5027 void qlt_set_mode(struct scsi_qla_host *vha) 5028 { 5029 struct qla_hw_data *ha = vha->hw; 5030 5031 switch (ql2x_ini_mode) { 5032 case QLA2XXX_INI_MODE_DISABLED: 5033 case QLA2XXX_INI_MODE_EXCLUSIVE: 5034 vha->host->active_mode = MODE_TARGET; 5035 break; 5036 case QLA2XXX_INI_MODE_ENABLED: 5037 vha->host->active_mode |= MODE_TARGET; 5038 break; 5039 default: 5040 break; 5041 } 5042 5043 if (ha->tgt.ini_mode_force_reverse) 5044 qla_reverse_ini_mode(vha); 5045 } 5046 5047 /* Must be called under HW lock */ 5048 void qlt_clear_mode(struct scsi_qla_host *vha) 5049 { 5050 struct qla_hw_data *ha = vha->hw; 5051 5052 switch (ql2x_ini_mode) { 5053 case QLA2XXX_INI_MODE_DISABLED: 5054 vha->host->active_mode = MODE_UNKNOWN; 5055 break; 5056 case QLA2XXX_INI_MODE_EXCLUSIVE: 5057 vha->host->active_mode = MODE_INITIATOR; 5058 break; 5059 case QLA2XXX_INI_MODE_ENABLED: 5060 vha->host->active_mode &= ~MODE_TARGET; 5061 break; 5062 default: 5063 break; 5064 } 5065 5066 if (ha->tgt.ini_mode_force_reverse) 5067 qla_reverse_ini_mode(vha); 5068 } 5069 5070 /* 5071 * qla_tgt_enable_vha - NO LOCK HELD 5072 * 5073 * host_reset, bring up w/ Target Mode Enabled 5074 */ 5075 void 5076 qlt_enable_vha(struct scsi_qla_host *vha) 5077 { 5078 struct qla_hw_data *ha = vha->hw; 5079 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5080 unsigned long flags; 5081 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5082 5083 if (!tgt) { 5084 ql_dbg(ql_dbg_tgt, vha, 0xe069, 5085 "Unable to locate qla_tgt pointer from" 5086 " struct qla_hw_data\n"); 5087 dump_stack(); 5088 return; 5089 } 5090 5091 spin_lock_irqsave(&ha->hardware_lock, flags); 5092 tgt->tgt_stopped = 0; 5093 qlt_set_mode(vha); 5094 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5095 5096 if (vha->vp_idx) { 5097 qla24xx_disable_vp(vha); 5098 qla24xx_enable_vp(vha); 5099 } else { 5100 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 5101 qla2xxx_wake_dpc(base_vha); 5102 qla2x00_wait_for_hba_online(base_vha); 5103 } 5104 } 5105 EXPORT_SYMBOL(qlt_enable_vha); 5106 5107 /* 5108 * qla_tgt_disable_vha - NO LOCK HELD 5109 * 5110 * Disable Target Mode and reset the adapter 5111 */ 5112 void 5113 qlt_disable_vha(struct scsi_qla_host *vha) 5114 { 5115 struct qla_hw_data *ha = vha->hw; 5116 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5117 unsigned long flags; 5118 5119 if (!tgt) { 5120 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 5121 "Unable to locate qla_tgt pointer from" 5122 " struct qla_hw_data\n"); 5123 dump_stack(); 5124 return; 5125 } 5126 5127 spin_lock_irqsave(&ha->hardware_lock, flags); 5128 qlt_clear_mode(vha); 5129 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5130 5131 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5132 qla2xxx_wake_dpc(vha); 5133 qla2x00_wait_for_hba_online(vha); 5134 } 5135 5136 /* 5137 * Called from qla_init.c:qla24xx_vport_create() contex to setup 5138 * the target mode specific struct scsi_qla_host and struct qla_hw_data 5139 * members. 5140 */ 5141 void 5142 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 5143 { 5144 if (!qla_tgt_mode_enabled(vha)) 5145 return; 5146 5147 vha->vha_tgt.qla_tgt = NULL; 5148 5149 mutex_init(&vha->vha_tgt.tgt_mutex); 5150 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 5151 5152 qlt_clear_mode(vha); 5153 5154 /* 5155 * NOTE: Currently the value is kept the same for <24xx and 5156 * >=24xx ISPs. If it is necessary to change it, 5157 * the check should be added for specific ISPs, 5158 * assigning the value appropriately. 5159 */ 5160 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 5161 5162 qlt_add_target(ha, vha); 5163 } 5164 5165 void 5166 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 5167 { 5168 /* 5169 * FC-4 Feature bit 0 indicates target functionality to the name server. 5170 */ 5171 if (qla_tgt_mode_enabled(vha)) { 5172 if (qla_ini_mode_enabled(vha)) 5173 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 5174 else 5175 ct_req->req.rff_id.fc4_feature = BIT_0; 5176 } else if (qla_ini_mode_enabled(vha)) { 5177 ct_req->req.rff_id.fc4_feature = BIT_1; 5178 } 5179 } 5180 5181 /* 5182 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 5183 * @ha: HA context 5184 * 5185 * Beginning of ATIO ring has initialization control block already built 5186 * by nvram config routine. 5187 * 5188 * Returns 0 on success. 5189 */ 5190 void 5191 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 5192 { 5193 struct qla_hw_data *ha = vha->hw; 5194 uint16_t cnt; 5195 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 5196 5197 if (!qla_tgt_mode_enabled(vha)) 5198 return; 5199 5200 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 5201 pkt->u.raw.signature = ATIO_PROCESSED; 5202 pkt++; 5203 } 5204 5205 } 5206 5207 /* 5208 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 5209 * @ha: SCSI driver HA context 5210 */ 5211 void 5212 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 5213 { 5214 struct qla_hw_data *ha = vha->hw; 5215 struct atio_from_isp *pkt; 5216 int cnt, i; 5217 5218 if (!vha->flags.online) 5219 return; 5220 5221 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 5222 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 5223 cnt = pkt->u.raw.entry_count; 5224 5225 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 5226 5227 for (i = 0; i < cnt; i++) { 5228 ha->tgt.atio_ring_index++; 5229 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 5230 ha->tgt.atio_ring_index = 0; 5231 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 5232 } else 5233 ha->tgt.atio_ring_ptr++; 5234 5235 pkt->u.raw.signature = ATIO_PROCESSED; 5236 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 5237 } 5238 wmb(); 5239 } 5240 5241 /* Adjust ring index */ 5242 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 5243 } 5244 5245 void 5246 qlt_24xx_config_rings(struct scsi_qla_host *vha) 5247 { 5248 struct qla_hw_data *ha = vha->hw; 5249 if (!QLA_TGT_MODE_ENABLED()) 5250 return; 5251 5252 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 5253 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 5254 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 5255 5256 if (IS_ATIO_MSIX_CAPABLE(ha)) { 5257 struct qla_msix_entry *msix = &ha->msix_entries[2]; 5258 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 5259 5260 icb->msix_atio = cpu_to_le16(msix->entry); 5261 ql_dbg(ql_dbg_init, vha, 0xf072, 5262 "Registering ICB vector 0x%x for atio que.\n", 5263 msix->entry); 5264 } 5265 } 5266 5267 void 5268 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 5269 { 5270 struct qla_hw_data *ha = vha->hw; 5271 5272 if (qla_tgt_mode_enabled(vha)) { 5273 if (!ha->tgt.saved_set) { 5274 /* We save only once */ 5275 ha->tgt.saved_exchange_count = nv->exchange_count; 5276 ha->tgt.saved_firmware_options_1 = 5277 nv->firmware_options_1; 5278 ha->tgt.saved_firmware_options_2 = 5279 nv->firmware_options_2; 5280 ha->tgt.saved_firmware_options_3 = 5281 nv->firmware_options_3; 5282 ha->tgt.saved_set = 1; 5283 } 5284 5285 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 5286 5287 /* Enable target mode */ 5288 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 5289 5290 /* Disable ini mode, if requested */ 5291 if (!qla_ini_mode_enabled(vha)) 5292 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 5293 5294 /* Disable Full Login after LIP */ 5295 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5296 /* Enable initial LIP */ 5297 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5298 /* Enable FC tapes support */ 5299 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5300 /* Disable Full Login after LIP */ 5301 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5302 /* Enable target PRLI control */ 5303 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 5304 } else { 5305 if (ha->tgt.saved_set) { 5306 nv->exchange_count = ha->tgt.saved_exchange_count; 5307 nv->firmware_options_1 = 5308 ha->tgt.saved_firmware_options_1; 5309 nv->firmware_options_2 = 5310 ha->tgt.saved_firmware_options_2; 5311 nv->firmware_options_3 = 5312 ha->tgt.saved_firmware_options_3; 5313 } 5314 return; 5315 } 5316 5317 /* out-of-order frames reassembly */ 5318 nv->firmware_options_3 |= BIT_6|BIT_9; 5319 5320 if (ha->tgt.enable_class_2) { 5321 if (vha->flags.init_done) 5322 fc_host_supported_classes(vha->host) = 5323 FC_COS_CLASS2 | FC_COS_CLASS3; 5324 5325 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 5326 } else { 5327 if (vha->flags.init_done) 5328 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 5329 5330 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 5331 } 5332 } 5333 5334 void 5335 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 5336 struct init_cb_24xx *icb) 5337 { 5338 struct qla_hw_data *ha = vha->hw; 5339 5340 if (ha->tgt.node_name_set) { 5341 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 5342 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 5343 } 5344 } 5345 5346 void 5347 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 5348 { 5349 struct qla_hw_data *ha = vha->hw; 5350 5351 if (!QLA_TGT_MODE_ENABLED()) 5352 return; 5353 5354 if (qla_tgt_mode_enabled(vha)) { 5355 if (!ha->tgt.saved_set) { 5356 /* We save only once */ 5357 ha->tgt.saved_exchange_count = nv->exchange_count; 5358 ha->tgt.saved_firmware_options_1 = 5359 nv->firmware_options_1; 5360 ha->tgt.saved_firmware_options_2 = 5361 nv->firmware_options_2; 5362 ha->tgt.saved_firmware_options_3 = 5363 nv->firmware_options_3; 5364 ha->tgt.saved_set = 1; 5365 } 5366 5367 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 5368 5369 /* Enable target mode */ 5370 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 5371 5372 /* Disable ini mode, if requested */ 5373 if (!qla_ini_mode_enabled(vha)) 5374 nv->firmware_options_1 |= 5375 __constant_cpu_to_le32(BIT_5); 5376 5377 /* Disable Full Login after LIP */ 5378 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5379 /* Enable initial LIP */ 5380 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5381 /* Enable FC tapes support */ 5382 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5383 /* Disable Full Login after LIP */ 5384 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5385 /* Enable target PRLI control */ 5386 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 5387 } else { 5388 if (ha->tgt.saved_set) { 5389 nv->exchange_count = ha->tgt.saved_exchange_count; 5390 nv->firmware_options_1 = 5391 ha->tgt.saved_firmware_options_1; 5392 nv->firmware_options_2 = 5393 ha->tgt.saved_firmware_options_2; 5394 nv->firmware_options_3 = 5395 ha->tgt.saved_firmware_options_3; 5396 } 5397 return; 5398 } 5399 5400 /* out-of-order frames reassembly */ 5401 nv->firmware_options_3 |= BIT_6|BIT_9; 5402 5403 if (ha->tgt.enable_class_2) { 5404 if (vha->flags.init_done) 5405 fc_host_supported_classes(vha->host) = 5406 FC_COS_CLASS2 | FC_COS_CLASS3; 5407 5408 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 5409 } else { 5410 if (vha->flags.init_done) 5411 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 5412 5413 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 5414 } 5415 } 5416 5417 void 5418 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 5419 struct init_cb_81xx *icb) 5420 { 5421 struct qla_hw_data *ha = vha->hw; 5422 5423 if (!QLA_TGT_MODE_ENABLED()) 5424 return; 5425 5426 if (ha->tgt.node_name_set) { 5427 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 5428 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 5429 } 5430 } 5431 5432 void 5433 qlt_83xx_iospace_config(struct qla_hw_data *ha) 5434 { 5435 if (!QLA_TGT_MODE_ENABLED()) 5436 return; 5437 5438 ha->msix_count += 1; /* For ATIO Q */ 5439 } 5440 5441 int 5442 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 5443 struct sts_entry_24xx *pkt) 5444 { 5445 switch (pkt->entry_type) { 5446 case ABTS_RECV_24XX: 5447 case ABTS_RESP_24XX: 5448 case CTIO_TYPE7: 5449 case NOTIFY_ACK_TYPE: 5450 case CTIO_CRC2: 5451 return 1; 5452 default: 5453 return 0; 5454 } 5455 } 5456 5457 void 5458 qlt_modify_vp_config(struct scsi_qla_host *vha, 5459 struct vp_config_entry_24xx *vpmod) 5460 { 5461 if (qla_tgt_mode_enabled(vha)) 5462 vpmod->options_idx1 &= ~BIT_5; 5463 /* Disable ini mode, if requested */ 5464 if (!qla_ini_mode_enabled(vha)) 5465 vpmod->options_idx1 &= ~BIT_4; 5466 } 5467 5468 void 5469 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 5470 { 5471 if (!QLA_TGT_MODE_ENABLED()) 5472 return; 5473 5474 if (ha->mqenable || IS_QLA83XX(ha)) { 5475 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 5476 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 5477 } else { 5478 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 5479 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 5480 } 5481 5482 mutex_init(&base_vha->vha_tgt.tgt_mutex); 5483 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 5484 qlt_clear_mode(base_vha); 5485 } 5486 5487 irqreturn_t 5488 qla83xx_msix_atio_q(int irq, void *dev_id) 5489 { 5490 struct rsp_que *rsp; 5491 scsi_qla_host_t *vha; 5492 struct qla_hw_data *ha; 5493 unsigned long flags; 5494 5495 rsp = (struct rsp_que *) dev_id; 5496 ha = rsp->hw; 5497 vha = pci_get_drvdata(ha->pdev); 5498 5499 spin_lock_irqsave(&ha->hardware_lock, flags); 5500 5501 qlt_24xx_process_atio_queue(vha); 5502 qla24xx_process_response_queue(vha, rsp); 5503 5504 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5505 5506 return IRQ_HANDLED; 5507 } 5508 5509 int 5510 qlt_mem_alloc(struct qla_hw_data *ha) 5511 { 5512 if (!QLA_TGT_MODE_ENABLED()) 5513 return 0; 5514 5515 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 5516 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 5517 if (!ha->tgt.tgt_vp_map) 5518 return -ENOMEM; 5519 5520 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 5521 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 5522 &ha->tgt.atio_dma, GFP_KERNEL); 5523 if (!ha->tgt.atio_ring) { 5524 kfree(ha->tgt.tgt_vp_map); 5525 return -ENOMEM; 5526 } 5527 return 0; 5528 } 5529 5530 void 5531 qlt_mem_free(struct qla_hw_data *ha) 5532 { 5533 if (!QLA_TGT_MODE_ENABLED()) 5534 return; 5535 5536 if (ha->tgt.atio_ring) { 5537 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 5538 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 5539 ha->tgt.atio_dma); 5540 } 5541 kfree(ha->tgt.tgt_vp_map); 5542 } 5543 5544 /* vport_slock to be held by the caller */ 5545 void 5546 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 5547 { 5548 if (!QLA_TGT_MODE_ENABLED()) 5549 return; 5550 5551 switch (cmd) { 5552 case SET_VP_IDX: 5553 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 5554 break; 5555 case SET_AL_PA: 5556 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 5557 break; 5558 case RESET_VP_IDX: 5559 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 5560 break; 5561 case RESET_AL_PA: 5562 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 5563 break; 5564 } 5565 } 5566 5567 static int __init qlt_parse_ini_mode(void) 5568 { 5569 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 5570 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 5571 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 5572 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 5573 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 5574 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 5575 else 5576 return false; 5577 5578 return true; 5579 } 5580 5581 int __init qlt_init(void) 5582 { 5583 int ret; 5584 5585 if (!qlt_parse_ini_mode()) { 5586 ql_log(ql_log_fatal, NULL, 0xe06b, 5587 "qlt_parse_ini_mode() failed\n"); 5588 return -EINVAL; 5589 } 5590 5591 if (!QLA_TGT_MODE_ENABLED()) 5592 return 0; 5593 5594 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 5595 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 5596 qla_tgt_mgmt_cmd), 0, NULL); 5597 if (!qla_tgt_mgmt_cmd_cachep) { 5598 ql_log(ql_log_fatal, NULL, 0xe06d, 5599 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 5600 return -ENOMEM; 5601 } 5602 5603 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 5604 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 5605 if (!qla_tgt_mgmt_cmd_mempool) { 5606 ql_log(ql_log_fatal, NULL, 0xe06e, 5607 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 5608 ret = -ENOMEM; 5609 goto out_mgmt_cmd_cachep; 5610 } 5611 5612 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 5613 if (!qla_tgt_wq) { 5614 ql_log(ql_log_fatal, NULL, 0xe06f, 5615 "alloc_workqueue for qla_tgt_wq failed\n"); 5616 ret = -ENOMEM; 5617 goto out_cmd_mempool; 5618 } 5619 /* 5620 * Return 1 to signal that initiator-mode is being disabled 5621 */ 5622 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 5623 5624 out_cmd_mempool: 5625 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5626 out_mgmt_cmd_cachep: 5627 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5628 return ret; 5629 } 5630 5631 void qlt_exit(void) 5632 { 5633 if (!QLA_TGT_MODE_ENABLED()) 5634 return; 5635 5636 destroy_workqueue(qla_tgt_wq); 5637 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5638 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5639 } 5640