1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 108 static mempool_t *qla_tgt_mgmt_cmd_mempool; 109 static struct workqueue_struct *qla_tgt_wq; 110 static DEFINE_MUTEX(qla_tgt_mutex); 111 static LIST_HEAD(qla_tgt_glist); 112 113 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 114 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 115 struct qla_tgt *tgt, 116 const uint8_t *port_name) 117 { 118 struct qla_tgt_sess *sess; 119 120 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 121 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 122 return sess; 123 } 124 125 return NULL; 126 } 127 128 /* Might release hw lock, then reaquire!! */ 129 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 130 { 131 /* Send marker if required */ 132 if (unlikely(vha->marker_needed != 0)) { 133 int rc = qla2x00_issue_marker(vha, vha_locked); 134 if (rc != QLA_SUCCESS) { 135 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 136 "qla_target(%d): issue_marker() failed\n", 137 vha->vp_idx); 138 } 139 return rc; 140 } 141 return QLA_SUCCESS; 142 } 143 144 static inline 145 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 146 uint8_t *d_id) 147 { 148 struct qla_hw_data *ha = vha->hw; 149 uint8_t vp_idx; 150 151 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 152 return NULL; 153 154 if (vha->d_id.b.al_pa == d_id[2]) 155 return vha; 156 157 BUG_ON(ha->tgt.tgt_vp_map == NULL); 158 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 159 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 160 return ha->tgt.tgt_vp_map[vp_idx].vha; 161 162 return NULL; 163 } 164 165 static inline 166 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 167 uint16_t vp_idx) 168 { 169 struct qla_hw_data *ha = vha->hw; 170 171 if (vha->vp_idx == vp_idx) 172 return vha; 173 174 BUG_ON(ha->tgt.tgt_vp_map == NULL); 175 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 176 return ha->tgt.tgt_vp_map[vp_idx].vha; 177 178 return NULL; 179 } 180 181 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 182 struct atio_from_isp *atio) 183 { 184 ql_dbg(ql_dbg_tgt, vha, 0xe072, 185 "%s: qla_target(%d): type %x ox_id %04x\n", 186 __func__, vha->vp_idx, atio->u.raw.entry_type, 187 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 188 189 switch (atio->u.raw.entry_type) { 190 case ATIO_TYPE7: 191 { 192 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 193 atio->u.isp24.fcp_hdr.d_id); 194 if (unlikely(NULL == host)) { 195 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 196 "qla_target(%d): Received ATIO_TYPE7 " 197 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 198 atio->u.isp24.fcp_hdr.d_id[0], 199 atio->u.isp24.fcp_hdr.d_id[1], 200 atio->u.isp24.fcp_hdr.d_id[2]); 201 break; 202 } 203 qlt_24xx_atio_pkt(host, atio); 204 break; 205 } 206 207 case IMMED_NOTIFY_TYPE: 208 { 209 struct scsi_qla_host *host = vha; 210 struct imm_ntfy_from_isp *entry = 211 (struct imm_ntfy_from_isp *)atio; 212 213 if ((entry->u.isp24.vp_index != 0xFF) && 214 (entry->u.isp24.nport_handle != 0xFFFF)) { 215 host = qlt_find_host_by_vp_idx(vha, 216 entry->u.isp24.vp_index); 217 if (unlikely(!host)) { 218 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 219 "qla_target(%d): Received " 220 "ATIO (IMMED_NOTIFY_TYPE) " 221 "with unknown vp_index %d\n", 222 vha->vp_idx, entry->u.isp24.vp_index); 223 break; 224 } 225 } 226 qlt_24xx_atio_pkt(host, atio); 227 break; 228 } 229 230 default: 231 ql_dbg(ql_dbg_tgt, vha, 0xe040, 232 "qla_target(%d): Received unknown ATIO atio " 233 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 234 break; 235 } 236 237 return; 238 } 239 240 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 241 { 242 switch (pkt->entry_type) { 243 case CTIO_CRC2: 244 ql_dbg(ql_dbg_tgt, vha, 0xe073, 245 "qla_target(%d):%s: CRC2 Response pkt\n", 246 vha->vp_idx, __func__); 247 case CTIO_TYPE7: 248 { 249 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 250 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 251 entry->vp_index); 252 if (unlikely(!host)) { 253 ql_dbg(ql_dbg_tgt, vha, 0xe041, 254 "qla_target(%d): Response pkt (CTIO_TYPE7) " 255 "received, with unknown vp_index %d\n", 256 vha->vp_idx, entry->vp_index); 257 break; 258 } 259 qlt_response_pkt(host, pkt); 260 break; 261 } 262 263 case IMMED_NOTIFY_TYPE: 264 { 265 struct scsi_qla_host *host = vha; 266 struct imm_ntfy_from_isp *entry = 267 (struct imm_ntfy_from_isp *)pkt; 268 269 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 270 if (unlikely(!host)) { 271 ql_dbg(ql_dbg_tgt, vha, 0xe042, 272 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 273 "received, with unknown vp_index %d\n", 274 vha->vp_idx, entry->u.isp24.vp_index); 275 break; 276 } 277 qlt_response_pkt(host, pkt); 278 break; 279 } 280 281 case NOTIFY_ACK_TYPE: 282 { 283 struct scsi_qla_host *host = vha; 284 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 285 286 if (0xFF != entry->u.isp24.vp_index) { 287 host = qlt_find_host_by_vp_idx(vha, 288 entry->u.isp24.vp_index); 289 if (unlikely(!host)) { 290 ql_dbg(ql_dbg_tgt, vha, 0xe043, 291 "qla_target(%d): Response " 292 "pkt (NOTIFY_ACK_TYPE) " 293 "received, with unknown " 294 "vp_index %d\n", vha->vp_idx, 295 entry->u.isp24.vp_index); 296 break; 297 } 298 } 299 qlt_response_pkt(host, pkt); 300 break; 301 } 302 303 case ABTS_RECV_24XX: 304 { 305 struct abts_recv_from_24xx *entry = 306 (struct abts_recv_from_24xx *)pkt; 307 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 308 entry->vp_index); 309 if (unlikely(!host)) { 310 ql_dbg(ql_dbg_tgt, vha, 0xe044, 311 "qla_target(%d): Response pkt " 312 "(ABTS_RECV_24XX) received, with unknown " 313 "vp_index %d\n", vha->vp_idx, entry->vp_index); 314 break; 315 } 316 qlt_response_pkt(host, pkt); 317 break; 318 } 319 320 case ABTS_RESP_24XX: 321 { 322 struct abts_resp_to_24xx *entry = 323 (struct abts_resp_to_24xx *)pkt; 324 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 325 entry->vp_index); 326 if (unlikely(!host)) { 327 ql_dbg(ql_dbg_tgt, vha, 0xe045, 328 "qla_target(%d): Response pkt " 329 "(ABTS_RECV_24XX) received, with unknown " 330 "vp_index %d\n", vha->vp_idx, entry->vp_index); 331 break; 332 } 333 qlt_response_pkt(host, pkt); 334 break; 335 } 336 337 default: 338 qlt_response_pkt(vha, pkt); 339 break; 340 } 341 342 } 343 344 static void qlt_free_session_done(struct work_struct *work) 345 { 346 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 347 free_work); 348 struct qla_tgt *tgt = sess->tgt; 349 struct scsi_qla_host *vha = sess->vha; 350 struct qla_hw_data *ha = vha->hw; 351 352 BUG_ON(!tgt); 353 /* 354 * Release the target session for FC Nexus from fabric module code. 355 */ 356 if (sess->se_sess != NULL) 357 ha->tgt.tgt_ops->free_session(sess); 358 359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 360 "Unregistration of sess %p finished\n", sess); 361 362 kfree(sess); 363 /* 364 * We need to protect against race, when tgt is freed before or 365 * inside wake_up() 366 */ 367 tgt->sess_count--; 368 if (tgt->sess_count == 0) 369 wake_up_all(&tgt->waitQ); 370 } 371 372 /* ha->hardware_lock supposed to be held on entry */ 373 void qlt_unreg_sess(struct qla_tgt_sess *sess) 374 { 375 struct scsi_qla_host *vha = sess->vha; 376 377 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 378 379 list_del(&sess->sess_list_entry); 380 if (sess->deleted) 381 list_del(&sess->del_list_entry); 382 383 INIT_WORK(&sess->free_work, qlt_free_session_done); 384 schedule_work(&sess->free_work); 385 } 386 EXPORT_SYMBOL(qlt_unreg_sess); 387 388 /* ha->hardware_lock supposed to be held on entry */ 389 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 390 { 391 struct qla_hw_data *ha = vha->hw; 392 struct qla_tgt_sess *sess = NULL; 393 uint32_t unpacked_lun, lun = 0; 394 uint16_t loop_id; 395 int res = 0; 396 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 397 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 398 399 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 400 if (loop_id == 0xFFFF) { 401 #if 0 /* FIXME: Re-enable Global event handling.. */ 402 /* Global event */ 403 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 404 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 405 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 406 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 407 typeof(*sess), sess_list_entry); 408 switch (mcmd) { 409 case QLA_TGT_NEXUS_LOSS_SESS: 410 mcmd = QLA_TGT_NEXUS_LOSS; 411 break; 412 case QLA_TGT_ABORT_ALL_SESS: 413 mcmd = QLA_TGT_ABORT_ALL; 414 break; 415 case QLA_TGT_NEXUS_LOSS: 416 case QLA_TGT_ABORT_ALL: 417 break; 418 default: 419 ql_dbg(ql_dbg_tgt, vha, 0xe046, 420 "qla_target(%d): Not allowed " 421 "command %x in %s", vha->vp_idx, 422 mcmd, __func__); 423 sess = NULL; 424 break; 425 } 426 } else 427 sess = NULL; 428 #endif 429 } else { 430 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 431 } 432 433 ql_dbg(ql_dbg_tgt, vha, 0xe000, 434 "Using sess for qla_tgt_reset: %p\n", sess); 435 if (!sess) { 436 res = -ESRCH; 437 return res; 438 } 439 440 ql_dbg(ql_dbg_tgt, vha, 0xe047, 441 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 442 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 443 mcmd, loop_id); 444 445 lun = a->u.isp24.fcp_cmnd.lun; 446 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 447 448 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 449 iocb, QLA24XX_MGMT_SEND_NACK); 450 } 451 452 /* ha->hardware_lock supposed to be held on entry */ 453 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 454 bool immediate) 455 { 456 struct qla_tgt *tgt = sess->tgt; 457 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 458 459 if (sess->deleted) 460 return; 461 462 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 463 "Scheduling sess %p for deletion\n", sess); 464 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 465 sess->deleted = 1; 466 467 if (immediate) 468 dev_loss_tmo = 0; 469 470 sess->expires = jiffies + dev_loss_tmo * HZ; 471 472 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 473 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 474 "deletion in %u secs (expires: %lu) immed: %d\n", 475 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 476 sess->expires, immediate); 477 478 if (immediate) 479 schedule_delayed_work(&tgt->sess_del_work, 0); 480 else 481 schedule_delayed_work(&tgt->sess_del_work, 482 sess->expires - jiffies); 483 } 484 485 /* ha->hardware_lock supposed to be held on entry */ 486 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 487 { 488 struct qla_tgt_sess *sess; 489 490 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 491 qlt_schedule_sess_for_deletion(sess, true); 492 493 /* At this point tgt could be already dead */ 494 } 495 496 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 497 uint16_t *loop_id) 498 { 499 struct qla_hw_data *ha = vha->hw; 500 dma_addr_t gid_list_dma; 501 struct gid_list_info *gid_list; 502 char *id_iter; 503 int res, rc, i; 504 uint16_t entries; 505 506 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 507 &gid_list_dma, GFP_KERNEL); 508 if (!gid_list) { 509 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 510 "qla_target(%d): DMA Alloc failed of %u\n", 511 vha->vp_idx, qla2x00_gid_list_size(ha)); 512 return -ENOMEM; 513 } 514 515 /* Get list of logged in devices */ 516 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 517 if (rc != QLA_SUCCESS) { 518 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 519 "qla_target(%d): get_id_list() failed: %x\n", 520 vha->vp_idx, rc); 521 res = -1; 522 goto out_free_id_list; 523 } 524 525 id_iter = (char *)gid_list; 526 res = -1; 527 for (i = 0; i < entries; i++) { 528 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 529 if ((gid->al_pa == s_id[2]) && 530 (gid->area == s_id[1]) && 531 (gid->domain == s_id[0])) { 532 *loop_id = le16_to_cpu(gid->loop_id); 533 res = 0; 534 break; 535 } 536 id_iter += ha->gid_list_info_size; 537 } 538 539 out_free_id_list: 540 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 541 gid_list, gid_list_dma); 542 return res; 543 } 544 545 /* ha->hardware_lock supposed to be held on entry */ 546 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 547 { 548 BUG_ON(!sess->deleted); 549 550 list_del(&sess->del_list_entry); 551 sess->deleted = 0; 552 } 553 554 static void qlt_del_sess_work_fn(struct delayed_work *work) 555 { 556 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 557 sess_del_work); 558 struct scsi_qla_host *vha = tgt->vha; 559 struct qla_hw_data *ha = vha->hw; 560 struct qla_tgt_sess *sess; 561 unsigned long flags, elapsed; 562 563 spin_lock_irqsave(&ha->hardware_lock, flags); 564 while (!list_empty(&tgt->del_sess_list)) { 565 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 566 del_list_entry); 567 elapsed = jiffies; 568 if (time_after_eq(elapsed, sess->expires)) { 569 qlt_undelete_sess(sess); 570 571 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 572 "Timeout: sess %p about to be deleted\n", 573 sess); 574 ha->tgt.tgt_ops->shutdown_sess(sess); 575 ha->tgt.tgt_ops->put_sess(sess); 576 } else { 577 schedule_delayed_work(&tgt->sess_del_work, 578 sess->expires - elapsed); 579 break; 580 } 581 } 582 spin_unlock_irqrestore(&ha->hardware_lock, flags); 583 } 584 585 /* 586 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 587 * Caller must put it. 588 */ 589 static struct qla_tgt_sess *qlt_create_sess( 590 struct scsi_qla_host *vha, 591 fc_port_t *fcport, 592 bool local) 593 { 594 struct qla_hw_data *ha = vha->hw; 595 struct qla_tgt_sess *sess; 596 unsigned long flags; 597 unsigned char be_sid[3]; 598 599 /* Check to avoid double sessions */ 600 spin_lock_irqsave(&ha->hardware_lock, flags); 601 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, 602 sess_list_entry) { 603 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 605 "Double sess %p found (s_id %x:%x:%x, " 606 "loop_id %d), updating to d_id %x:%x:%x, " 607 "loop_id %d", sess, sess->s_id.b.domain, 608 sess->s_id.b.al_pa, sess->s_id.b.area, 609 sess->loop_id, fcport->d_id.b.domain, 610 fcport->d_id.b.al_pa, fcport->d_id.b.area, 611 fcport->loop_id); 612 613 if (sess->deleted) 614 qlt_undelete_sess(sess); 615 616 kref_get(&sess->se_sess->sess_kref); 617 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 618 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 619 620 if (sess->local && !local) 621 sess->local = 0; 622 spin_unlock_irqrestore(&ha->hardware_lock, flags); 623 624 return sess; 625 } 626 } 627 spin_unlock_irqrestore(&ha->hardware_lock, flags); 628 629 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 630 if (!sess) { 631 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 632 "qla_target(%u): session allocation failed, all commands " 633 "from port %8phC will be refused", vha->vp_idx, 634 fcport->port_name); 635 636 return NULL; 637 } 638 sess->tgt = vha->vha_tgt.qla_tgt; 639 sess->vha = vha; 640 sess->s_id = fcport->d_id; 641 sess->loop_id = fcport->loop_id; 642 sess->local = local; 643 644 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 645 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 646 sess, vha->vha_tgt.qla_tgt); 647 648 be_sid[0] = sess->s_id.b.domain; 649 be_sid[1] = sess->s_id.b.area; 650 be_sid[2] = sess->s_id.b.al_pa; 651 /* 652 * Determine if this fc_port->port_name is allowed to access 653 * target mode using explict NodeACLs+MappedLUNs, or using 654 * TPG demo mode. If this is successful a target mode FC nexus 655 * is created. 656 */ 657 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 658 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 659 kfree(sess); 660 return NULL; 661 } 662 /* 663 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 664 * access across ->hardware_lock reaquire. 665 */ 666 kref_get(&sess->se_sess->sess_kref); 667 668 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 669 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 670 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 671 672 spin_lock_irqsave(&ha->hardware_lock, flags); 673 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 674 vha->vha_tgt.qla_tgt->sess_count++; 675 spin_unlock_irqrestore(&ha->hardware_lock, flags); 676 677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 678 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 679 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 680 vha->vp_idx, local ? "local " : "", fcport->port_name, 681 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 682 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 683 684 return sess; 685 } 686 687 /* 688 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 689 */ 690 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 691 { 692 struct qla_hw_data *ha = vha->hw; 693 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 694 struct qla_tgt_sess *sess; 695 unsigned long flags; 696 697 if (!vha->hw->tgt.tgt_ops) 698 return; 699 700 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 701 return; 702 703 if (qla_ini_mode_enabled(vha)) 704 return; 705 706 spin_lock_irqsave(&ha->hardware_lock, flags); 707 if (tgt->tgt_stop) { 708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 709 return; 710 } 711 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 712 if (!sess) { 713 spin_unlock_irqrestore(&ha->hardware_lock, flags); 714 715 mutex_lock(&vha->vha_tgt.tgt_mutex); 716 sess = qlt_create_sess(vha, fcport, false); 717 mutex_unlock(&vha->vha_tgt.tgt_mutex); 718 719 spin_lock_irqsave(&ha->hardware_lock, flags); 720 } else { 721 kref_get(&sess->se_sess->sess_kref); 722 723 if (sess->deleted) { 724 qlt_undelete_sess(sess); 725 726 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 727 "qla_target(%u): %ssession for port %8phC " 728 "(loop ID %d) reappeared\n", vha->vp_idx, 729 sess->local ? "local " : "", sess->port_name, 730 sess->loop_id); 731 732 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 733 "Reappeared sess %p\n", sess); 734 } 735 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 736 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 737 } 738 739 if (sess && sess->local) { 740 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 741 "qla_target(%u): local session for " 742 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 743 fcport->port_name, sess->loop_id); 744 sess->local = 0; 745 } 746 ha->tgt.tgt_ops->put_sess(sess); 747 spin_unlock_irqrestore(&ha->hardware_lock, flags); 748 } 749 750 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 751 { 752 struct qla_hw_data *ha = vha->hw; 753 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 754 struct qla_tgt_sess *sess; 755 unsigned long flags; 756 757 if (!vha->hw->tgt.tgt_ops) 758 return; 759 760 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 761 return; 762 763 spin_lock_irqsave(&ha->hardware_lock, flags); 764 if (tgt->tgt_stop) { 765 spin_unlock_irqrestore(&ha->hardware_lock, flags); 766 return; 767 } 768 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 769 if (!sess) { 770 spin_unlock_irqrestore(&ha->hardware_lock, flags); 771 return; 772 } 773 774 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 775 776 sess->local = 1; 777 qlt_schedule_sess_for_deletion(sess, false); 778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 779 } 780 781 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 782 { 783 struct qla_hw_data *ha = tgt->ha; 784 unsigned long flags; 785 int res; 786 /* 787 * We need to protect against race, when tgt is freed before or 788 * inside wake_up() 789 */ 790 spin_lock_irqsave(&ha->hardware_lock, flags); 791 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 792 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 793 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 794 res = (tgt->sess_count == 0); 795 spin_unlock_irqrestore(&ha->hardware_lock, flags); 796 797 return res; 798 } 799 800 /* Called by tcm_qla2xxx configfs code */ 801 int qlt_stop_phase1(struct qla_tgt *tgt) 802 { 803 struct scsi_qla_host *vha = tgt->vha; 804 struct qla_hw_data *ha = tgt->ha; 805 unsigned long flags; 806 807 mutex_lock(&qla_tgt_mutex); 808 if (!vha->fc_vport) { 809 struct Scsi_Host *sh = vha->host; 810 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 811 bool npiv_vports; 812 813 spin_lock_irqsave(sh->host_lock, flags); 814 npiv_vports = (fc_host->npiv_vports_inuse); 815 spin_unlock_irqrestore(sh->host_lock, flags); 816 817 if (npiv_vports) { 818 mutex_unlock(&qla_tgt_mutex); 819 return -EPERM; 820 } 821 } 822 if (tgt->tgt_stop || tgt->tgt_stopped) { 823 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 824 "Already in tgt->tgt_stop or tgt_stopped state\n"); 825 mutex_unlock(&qla_tgt_mutex); 826 return -EPERM; 827 } 828 829 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 830 vha->host_no, vha); 831 /* 832 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 833 * Lock is needed, because we still can get an incoming packet. 834 */ 835 mutex_lock(&vha->vha_tgt.tgt_mutex); 836 spin_lock_irqsave(&ha->hardware_lock, flags); 837 tgt->tgt_stop = 1; 838 qlt_clear_tgt_db(tgt, true); 839 spin_unlock_irqrestore(&ha->hardware_lock, flags); 840 mutex_unlock(&vha->vha_tgt.tgt_mutex); 841 mutex_unlock(&qla_tgt_mutex); 842 843 flush_delayed_work(&tgt->sess_del_work); 844 845 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 846 "Waiting for sess works (tgt %p)", tgt); 847 spin_lock_irqsave(&tgt->sess_work_lock, flags); 848 while (!list_empty(&tgt->sess_works_list)) { 849 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 850 flush_scheduled_work(); 851 spin_lock_irqsave(&tgt->sess_work_lock, flags); 852 } 853 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 854 855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 856 "Waiting for tgt %p: list_empty(sess_list)=%d " 857 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 858 tgt->sess_count); 859 860 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 861 862 /* Big hammer */ 863 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 864 qlt_disable_vha(vha); 865 866 /* Wait for sessions to clear out (just in case) */ 867 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 868 return 0; 869 } 870 EXPORT_SYMBOL(qlt_stop_phase1); 871 872 /* Called by tcm_qla2xxx configfs code */ 873 void qlt_stop_phase2(struct qla_tgt *tgt) 874 { 875 struct qla_hw_data *ha = tgt->ha; 876 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 877 unsigned long flags; 878 879 if (tgt->tgt_stopped) { 880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 881 "Already in tgt->tgt_stopped state\n"); 882 dump_stack(); 883 return; 884 } 885 886 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 887 "Waiting for %d IRQ commands to complete (tgt %p)", 888 tgt->irq_cmd_count, tgt); 889 890 mutex_lock(&vha->vha_tgt.tgt_mutex); 891 spin_lock_irqsave(&ha->hardware_lock, flags); 892 while (tgt->irq_cmd_count != 0) { 893 spin_unlock_irqrestore(&ha->hardware_lock, flags); 894 udelay(2); 895 spin_lock_irqsave(&ha->hardware_lock, flags); 896 } 897 tgt->tgt_stop = 0; 898 tgt->tgt_stopped = 1; 899 spin_unlock_irqrestore(&ha->hardware_lock, flags); 900 mutex_unlock(&vha->vha_tgt.tgt_mutex); 901 902 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", 903 tgt); 904 } 905 EXPORT_SYMBOL(qlt_stop_phase2); 906 907 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 908 static void qlt_release(struct qla_tgt *tgt) 909 { 910 scsi_qla_host_t *vha = tgt->vha; 911 912 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 913 qlt_stop_phase2(tgt); 914 915 vha->vha_tgt.qla_tgt = NULL; 916 917 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 918 "Release of tgt %p finished\n", tgt); 919 920 kfree(tgt); 921 } 922 923 /* ha->hardware_lock supposed to be held on entry */ 924 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 925 const void *param, unsigned int param_size) 926 { 927 struct qla_tgt_sess_work_param *prm; 928 unsigned long flags; 929 930 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 931 if (!prm) { 932 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 933 "qla_target(%d): Unable to create session " 934 "work, command will be refused", 0); 935 return -ENOMEM; 936 } 937 938 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 939 "Scheduling work (type %d, prm %p)" 940 " to find session for param %p (size %d, tgt %p)\n", 941 type, prm, param, param_size, tgt); 942 943 prm->type = type; 944 memcpy(&prm->tm_iocb, param, param_size); 945 946 spin_lock_irqsave(&tgt->sess_work_lock, flags); 947 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 948 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 949 950 schedule_work(&tgt->sess_work); 951 952 return 0; 953 } 954 955 /* 956 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 957 */ 958 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 959 struct imm_ntfy_from_isp *ntfy, 960 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 961 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 962 { 963 struct qla_hw_data *ha = vha->hw; 964 request_t *pkt; 965 struct nack_to_isp *nack; 966 967 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 968 969 /* Send marker if required */ 970 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 971 return; 972 973 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 974 if (!pkt) { 975 ql_dbg(ql_dbg_tgt, vha, 0xe049, 976 "qla_target(%d): %s failed: unable to allocate " 977 "request packet\n", vha->vp_idx, __func__); 978 return; 979 } 980 981 if (vha->vha_tgt.qla_tgt != NULL) 982 vha->vha_tgt.qla_tgt->notify_ack_expected++; 983 984 pkt->entry_type = NOTIFY_ACK_TYPE; 985 pkt->entry_count = 1; 986 987 nack = (struct nack_to_isp *)pkt; 988 nack->ox_id = ntfy->ox_id; 989 990 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 991 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 992 nack->u.isp24.flags = ntfy->u.isp24.flags & 993 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 994 } 995 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 996 nack->u.isp24.status = ntfy->u.isp24.status; 997 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 998 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 999 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1000 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1001 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1002 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1003 nack->u.isp24.srr_reject_code = srr_reject_code; 1004 nack->u.isp24.srr_reject_code_expl = srr_explan; 1005 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1006 1007 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1008 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1009 vha->vp_idx, nack->u.isp24.status); 1010 1011 qla2x00_start_iocbs(vha, vha->req); 1012 } 1013 1014 /* 1015 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1016 */ 1017 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1018 struct abts_recv_from_24xx *abts, uint32_t status, 1019 bool ids_reversed) 1020 { 1021 struct qla_hw_data *ha = vha->hw; 1022 struct abts_resp_to_24xx *resp; 1023 uint32_t f_ctl; 1024 uint8_t *p; 1025 1026 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1027 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1028 ha, abts, status); 1029 1030 /* Send marker if required */ 1031 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1032 return; 1033 1034 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1035 if (!resp) { 1036 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1037 "qla_target(%d): %s failed: unable to allocate " 1038 "request packet", vha->vp_idx, __func__); 1039 return; 1040 } 1041 1042 resp->entry_type = ABTS_RESP_24XX; 1043 resp->entry_count = 1; 1044 resp->nport_handle = abts->nport_handle; 1045 resp->vp_index = vha->vp_idx; 1046 resp->sof_type = abts->sof_type; 1047 resp->exchange_address = abts->exchange_address; 1048 resp->fcp_hdr_le = abts->fcp_hdr_le; 1049 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1050 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1051 F_CTL_SEQ_INITIATIVE); 1052 p = (uint8_t *)&f_ctl; 1053 resp->fcp_hdr_le.f_ctl[0] = *p++; 1054 resp->fcp_hdr_le.f_ctl[1] = *p++; 1055 resp->fcp_hdr_le.f_ctl[2] = *p; 1056 if (ids_reversed) { 1057 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1058 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1059 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1060 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1061 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1062 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1063 } else { 1064 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1065 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1066 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1067 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1068 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1069 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1070 } 1071 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1072 if (status == FCP_TMF_CMPL) { 1073 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1074 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1075 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1076 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1077 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1078 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1079 } else { 1080 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1081 resp->payload.ba_rjt.reason_code = 1082 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1083 /* Other bytes are zero */ 1084 } 1085 1086 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1087 1088 qla2x00_start_iocbs(vha, vha->req); 1089 } 1090 1091 /* 1092 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1093 */ 1094 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1095 struct abts_resp_from_24xx_fw *entry) 1096 { 1097 struct ctio7_to_24xx *ctio; 1098 1099 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1100 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1101 /* Send marker if required */ 1102 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1103 return; 1104 1105 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1106 if (ctio == NULL) { 1107 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1108 "qla_target(%d): %s failed: unable to allocate " 1109 "request packet\n", vha->vp_idx, __func__); 1110 return; 1111 } 1112 1113 /* 1114 * We've got on entrance firmware's response on by us generated 1115 * ABTS response. So, in it ID fields are reversed. 1116 */ 1117 1118 ctio->entry_type = CTIO_TYPE7; 1119 ctio->entry_count = 1; 1120 ctio->nport_handle = entry->nport_handle; 1121 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1122 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1123 ctio->vp_index = vha->vp_idx; 1124 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1125 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1126 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1127 ctio->exchange_addr = entry->exchange_addr_to_abort; 1128 ctio->u.status1.flags = 1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1130 CTIO7_FLAGS_TERMINATE); 1131 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1132 1133 qla2x00_start_iocbs(vha, vha->req); 1134 1135 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1136 FCP_TMF_CMPL, true); 1137 } 1138 1139 /* ha->hardware_lock supposed to be held on entry */ 1140 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1141 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1142 { 1143 struct qla_hw_data *ha = vha->hw; 1144 struct se_session *se_sess = sess->se_sess; 1145 struct qla_tgt_mgmt_cmd *mcmd; 1146 struct se_cmd *se_cmd; 1147 u32 lun = 0; 1148 int rc; 1149 bool found_lun = false; 1150 1151 spin_lock(&se_sess->sess_cmd_lock); 1152 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1153 struct qla_tgt_cmd *cmd = 1154 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1155 if (cmd->tag == abts->exchange_addr_to_abort) { 1156 lun = cmd->unpacked_lun; 1157 found_lun = true; 1158 break; 1159 } 1160 } 1161 spin_unlock(&se_sess->sess_cmd_lock); 1162 1163 if (!found_lun) 1164 return -ENOENT; 1165 1166 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1167 "qla_target(%d): task abort (tag=%d)\n", 1168 vha->vp_idx, abts->exchange_addr_to_abort); 1169 1170 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1171 if (mcmd == NULL) { 1172 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1173 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1174 vha->vp_idx, __func__); 1175 return -ENOMEM; 1176 } 1177 memset(mcmd, 0, sizeof(*mcmd)); 1178 1179 mcmd->sess = sess; 1180 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1181 1182 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1183 abts->exchange_addr_to_abort); 1184 if (rc != 0) { 1185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1186 "qla_target(%d): tgt_ops->handle_tmr()" 1187 " failed: %d", vha->vp_idx, rc); 1188 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1189 return -EFAULT; 1190 } 1191 1192 return 0; 1193 } 1194 1195 /* 1196 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1197 */ 1198 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1199 struct abts_recv_from_24xx *abts) 1200 { 1201 struct qla_hw_data *ha = vha->hw; 1202 struct qla_tgt_sess *sess; 1203 uint32_t tag = abts->exchange_addr_to_abort; 1204 uint8_t s_id[3]; 1205 int rc; 1206 1207 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1209 "qla_target(%d): ABTS: Abort Sequence not " 1210 "supported\n", vha->vp_idx); 1211 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1212 return; 1213 } 1214 1215 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1217 "qla_target(%d): ABTS: Unknown Exchange " 1218 "Address received\n", vha->vp_idx); 1219 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1220 return; 1221 } 1222 1223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1224 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1225 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1226 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1227 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1228 1229 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1230 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1231 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1232 1233 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1234 if (!sess) { 1235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1236 "qla_target(%d): task abort for non-existant session\n", 1237 vha->vp_idx); 1238 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 1239 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1240 if (rc != 0) { 1241 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1242 false); 1243 } 1244 return; 1245 } 1246 1247 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1248 if (rc != 0) { 1249 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1250 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1251 vha->vp_idx, rc); 1252 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1253 return; 1254 } 1255 } 1256 1257 /* 1258 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1259 */ 1260 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1261 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1262 { 1263 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1264 struct ctio7_to_24xx *ctio; 1265 1266 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1267 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1268 ha, atio, resp_code); 1269 1270 /* Send marker if required */ 1271 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1272 return; 1273 1274 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1275 if (ctio == NULL) { 1276 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1277 "qla_target(%d): %s failed: unable to allocate " 1278 "request packet\n", ha->vp_idx, __func__); 1279 return; 1280 } 1281 1282 ctio->entry_type = CTIO_TYPE7; 1283 ctio->entry_count = 1; 1284 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1285 ctio->nport_handle = mcmd->sess->loop_id; 1286 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1287 ctio->vp_index = ha->vp_idx; 1288 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1289 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1290 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1291 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1292 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1293 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1294 CTIO7_FLAGS_SEND_STATUS); 1295 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1296 ctio->u.status1.scsi_status = 1297 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1298 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1299 ctio->u.status1.sense_data[0] = resp_code; 1300 1301 qla2x00_start_iocbs(ha, ha->req); 1302 } 1303 1304 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1305 { 1306 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1307 } 1308 EXPORT_SYMBOL(qlt_free_mcmd); 1309 1310 /* callback from target fabric module code */ 1311 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1312 { 1313 struct scsi_qla_host *vha = mcmd->sess->vha; 1314 struct qla_hw_data *ha = vha->hw; 1315 unsigned long flags; 1316 1317 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1318 "TM response mcmd (%p) status %#x state %#x", 1319 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1320 1321 spin_lock_irqsave(&ha->hardware_lock, flags); 1322 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1323 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1324 0, 0, 0, 0, 0, 0); 1325 else { 1326 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1327 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1328 mcmd->fc_tm_rsp, false); 1329 else 1330 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1331 mcmd->fc_tm_rsp); 1332 } 1333 /* 1334 * Make the callback for ->free_mcmd() to queue_work() and invoke 1335 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1336 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1337 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1338 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1339 * qlt_xmit_tm_rsp() returns here.. 1340 */ 1341 ha->tgt.tgt_ops->free_mcmd(mcmd); 1342 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1343 } 1344 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1345 1346 /* No locks */ 1347 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1348 { 1349 struct qla_tgt_cmd *cmd = prm->cmd; 1350 1351 BUG_ON(cmd->sg_cnt == 0); 1352 1353 prm->sg = (struct scatterlist *)cmd->sg; 1354 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1355 cmd->sg_cnt, cmd->dma_data_direction); 1356 if (unlikely(prm->seg_cnt == 0)) 1357 goto out_err; 1358 1359 prm->cmd->sg_mapped = 1; 1360 1361 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 1362 /* 1363 * If greater than four sg entries then we need to allocate 1364 * the continuation entries 1365 */ 1366 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1367 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1368 prm->tgt->datasegs_per_cmd, 1369 prm->tgt->datasegs_per_cont); 1370 } else { 1371 /* DIF */ 1372 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1373 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1374 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 1375 prm->tot_dsds = prm->seg_cnt; 1376 } else 1377 prm->tot_dsds = prm->seg_cnt; 1378 1379 if (cmd->prot_sg_cnt) { 1380 prm->prot_sg = cmd->prot_sg; 1381 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, 1382 cmd->prot_sg, cmd->prot_sg_cnt, 1383 cmd->dma_data_direction); 1384 if (unlikely(prm->prot_seg_cnt == 0)) 1385 goto out_err; 1386 1387 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 1388 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 1389 /* Dif Bundling not support here */ 1390 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 1391 cmd->blk_sz); 1392 prm->tot_dsds += prm->prot_seg_cnt; 1393 } else 1394 prm->tot_dsds += prm->prot_seg_cnt; 1395 } 1396 } 1397 1398 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1399 prm->seg_cnt, prm->req_cnt); 1400 return 0; 1401 1402 out_err: 1403 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1404 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1405 0, prm->cmd->sg_cnt); 1406 return -1; 1407 } 1408 1409 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1410 struct qla_tgt_cmd *cmd) 1411 { 1412 struct qla_hw_data *ha = vha->hw; 1413 1414 BUG_ON(!cmd->sg_mapped); 1415 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1416 cmd->sg_mapped = 0; 1417 1418 if (cmd->prot_sg_cnt) 1419 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 1420 cmd->dma_data_direction); 1421 1422 if (cmd->ctx_dsd_alloced) 1423 qla2x00_clean_dsd_pool(ha, NULL, cmd); 1424 1425 if (cmd->ctx) 1426 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 1427 } 1428 1429 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1430 uint32_t req_cnt) 1431 { 1432 struct qla_hw_data *ha = vha->hw; 1433 device_reg_t __iomem *reg = ha->iobase; 1434 uint32_t cnt; 1435 1436 if (vha->req->cnt < (req_cnt + 2)) { 1437 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1438 1439 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1440 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1441 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1442 vha->req->ring_index, vha->req->cnt, req_cnt); 1443 if (vha->req->ring_index < cnt) 1444 vha->req->cnt = cnt - vha->req->ring_index; 1445 else 1446 vha->req->cnt = vha->req->length - 1447 (vha->req->ring_index - cnt); 1448 } 1449 1450 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1451 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1452 "qla_target(%d): There is no room in the " 1453 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1454 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1455 vha->req->cnt, req_cnt); 1456 return -EAGAIN; 1457 } 1458 vha->req->cnt -= req_cnt; 1459 1460 return 0; 1461 } 1462 1463 /* 1464 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1465 */ 1466 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1467 { 1468 /* Adjust ring index. */ 1469 vha->req->ring_index++; 1470 if (vha->req->ring_index == vha->req->length) { 1471 vha->req->ring_index = 0; 1472 vha->req->ring_ptr = vha->req->ring; 1473 } else { 1474 vha->req->ring_ptr++; 1475 } 1476 return (cont_entry_t *)vha->req->ring_ptr; 1477 } 1478 1479 /* ha->hardware_lock supposed to be held on entry */ 1480 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1481 { 1482 struct qla_hw_data *ha = vha->hw; 1483 uint32_t h; 1484 1485 h = ha->tgt.current_handle; 1486 /* always increment cmd handle */ 1487 do { 1488 ++h; 1489 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1490 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1491 if (h == ha->tgt.current_handle) { 1492 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1493 "qla_target(%d): Ran out of " 1494 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1495 h = QLA_TGT_NULL_HANDLE; 1496 break; 1497 } 1498 } while ((h == QLA_TGT_NULL_HANDLE) || 1499 (h == QLA_TGT_SKIP_HANDLE) || 1500 (ha->tgt.cmds[h-1] != NULL)); 1501 1502 if (h != QLA_TGT_NULL_HANDLE) 1503 ha->tgt.current_handle = h; 1504 1505 return h; 1506 } 1507 1508 /* ha->hardware_lock supposed to be held on entry */ 1509 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1510 struct scsi_qla_host *vha) 1511 { 1512 uint32_t h; 1513 struct ctio7_to_24xx *pkt; 1514 struct qla_hw_data *ha = vha->hw; 1515 struct atio_from_isp *atio = &prm->cmd->atio; 1516 1517 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1518 prm->pkt = pkt; 1519 memset(pkt, 0, sizeof(*pkt)); 1520 1521 pkt->entry_type = CTIO_TYPE7; 1522 pkt->entry_count = (uint8_t)prm->req_cnt; 1523 pkt->vp_index = vha->vp_idx; 1524 1525 h = qlt_make_handle(vha); 1526 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1527 /* 1528 * CTIO type 7 from the firmware doesn't provide a way to 1529 * know the initiator's LOOP ID, hence we can't find 1530 * the session and, so, the command. 1531 */ 1532 return -EAGAIN; 1533 } else 1534 ha->tgt.cmds[h-1] = prm->cmd; 1535 1536 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1537 pkt->nport_handle = prm->cmd->loop_id; 1538 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1539 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1540 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1541 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1542 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1543 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1544 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1545 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1546 1547 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1548 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1549 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1550 le16_to_cpu(pkt->u.status0.ox_id)); 1551 return 0; 1552 } 1553 1554 /* 1555 * ha->hardware_lock supposed to be held on entry. We have already made sure 1556 * that there is sufficient amount of request entries to not drop it. 1557 */ 1558 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1559 struct scsi_qla_host *vha) 1560 { 1561 int cnt; 1562 uint32_t *dword_ptr; 1563 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1564 1565 /* Build continuation packets */ 1566 while (prm->seg_cnt > 0) { 1567 cont_a64_entry_t *cont_pkt64 = 1568 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1569 1570 /* 1571 * Make sure that from cont_pkt64 none of 1572 * 64-bit specific fields used for 32-bit 1573 * addressing. Cast to (cont_entry_t *) for 1574 * that. 1575 */ 1576 1577 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1578 1579 cont_pkt64->entry_count = 1; 1580 cont_pkt64->sys_define = 0; 1581 1582 if (enable_64bit_addressing) { 1583 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1584 dword_ptr = 1585 (uint32_t *)&cont_pkt64->dseg_0_address; 1586 } else { 1587 cont_pkt64->entry_type = CONTINUE_TYPE; 1588 dword_ptr = 1589 (uint32_t *)&((cont_entry_t *) 1590 cont_pkt64)->dseg_0_address; 1591 } 1592 1593 /* Load continuation entry data segments */ 1594 for (cnt = 0; 1595 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1596 cnt++, prm->seg_cnt--) { 1597 *dword_ptr++ = 1598 cpu_to_le32(pci_dma_lo32 1599 (sg_dma_address(prm->sg))); 1600 if (enable_64bit_addressing) { 1601 *dword_ptr++ = 1602 cpu_to_le32(pci_dma_hi32 1603 (sg_dma_address 1604 (prm->sg))); 1605 } 1606 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1607 1608 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1609 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1610 (long long unsigned int) 1611 pci_dma_hi32(sg_dma_address(prm->sg)), 1612 (long long unsigned int) 1613 pci_dma_lo32(sg_dma_address(prm->sg)), 1614 (int)sg_dma_len(prm->sg)); 1615 1616 prm->sg = sg_next(prm->sg); 1617 } 1618 } 1619 } 1620 1621 /* 1622 * ha->hardware_lock supposed to be held on entry. We have already made sure 1623 * that there is sufficient amount of request entries to not drop it. 1624 */ 1625 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1626 struct scsi_qla_host *vha) 1627 { 1628 int cnt; 1629 uint32_t *dword_ptr; 1630 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1631 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1632 1633 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1634 "iocb->scsi_status=%x, iocb->flags=%x\n", 1635 le16_to_cpu(pkt24->u.status0.scsi_status), 1636 le16_to_cpu(pkt24->u.status0.flags)); 1637 1638 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1639 1640 /* Setup packet address segment pointer */ 1641 dword_ptr = pkt24->u.status0.dseg_0_address; 1642 1643 /* Set total data segment count */ 1644 if (prm->seg_cnt) 1645 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1646 1647 if (prm->seg_cnt == 0) { 1648 /* No data transfer */ 1649 *dword_ptr++ = 0; 1650 *dword_ptr = 0; 1651 return; 1652 } 1653 1654 /* If scatter gather */ 1655 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1656 1657 /* Load command entry data segments */ 1658 for (cnt = 0; 1659 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1660 cnt++, prm->seg_cnt--) { 1661 *dword_ptr++ = 1662 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1663 if (enable_64bit_addressing) { 1664 *dword_ptr++ = 1665 cpu_to_le32(pci_dma_hi32( 1666 sg_dma_address(prm->sg))); 1667 } 1668 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1669 1670 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1671 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1672 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1673 prm->sg)), 1674 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1675 prm->sg)), 1676 (int)sg_dma_len(prm->sg)); 1677 1678 prm->sg = sg_next(prm->sg); 1679 } 1680 1681 qlt_load_cont_data_segments(prm, vha); 1682 } 1683 1684 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1685 { 1686 return cmd->bufflen > 0; 1687 } 1688 1689 /* 1690 * Called without ha->hardware_lock held 1691 */ 1692 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1693 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1694 uint32_t *full_req_cnt) 1695 { 1696 struct qla_tgt *tgt = cmd->tgt; 1697 struct scsi_qla_host *vha = tgt->vha; 1698 struct qla_hw_data *ha = vha->hw; 1699 struct se_cmd *se_cmd = &cmd->se_cmd; 1700 1701 if (unlikely(cmd->aborted)) { 1702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1703 "qla_target(%d): terminating exchange " 1704 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1705 se_cmd, cmd->tag); 1706 1707 cmd->state = QLA_TGT_STATE_ABORTED; 1708 1709 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1710 1711 /* !! At this point cmd could be already freed !! */ 1712 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1713 } 1714 1715 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", 1716 vha->vp_idx, cmd->tag, 1717 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 1718 1719 prm->cmd = cmd; 1720 prm->tgt = tgt; 1721 prm->rq_result = scsi_status; 1722 prm->sense_buffer = &cmd->sense_buffer[0]; 1723 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1724 prm->sg = NULL; 1725 prm->seg_cnt = -1; 1726 prm->req_cnt = 1; 1727 prm->add_status_pkt = 0; 1728 1729 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1730 prm->rq_result, xmit_type); 1731 1732 /* Send marker if required */ 1733 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1734 return -EFAULT; 1735 1736 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1737 1738 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1739 if (qlt_pci_map_calc_cnt(prm) != 0) 1740 return -EAGAIN; 1741 } 1742 1743 *full_req_cnt = prm->req_cnt; 1744 1745 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1746 prm->residual = se_cmd->residual_count; 1747 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1748 "Residual underflow: %d (tag %d, " 1749 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1750 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1751 cmd->bufflen, prm->rq_result); 1752 prm->rq_result |= SS_RESIDUAL_UNDER; 1753 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1754 prm->residual = se_cmd->residual_count; 1755 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1756 "Residual overflow: %d (tag %d, " 1757 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1758 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1759 cmd->bufflen, prm->rq_result); 1760 prm->rq_result |= SS_RESIDUAL_OVER; 1761 } 1762 1763 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1764 /* 1765 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1766 * ignored in *xmit_response() below 1767 */ 1768 if (qlt_has_data(cmd)) { 1769 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1770 (IS_FWI2_CAPABLE(ha) && 1771 (prm->rq_result != 0))) { 1772 prm->add_status_pkt = 1; 1773 (*full_req_cnt)++; 1774 } 1775 } 1776 } 1777 1778 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1779 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1780 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1781 1782 return 0; 1783 } 1784 1785 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1786 struct qla_tgt_cmd *cmd, int sending_sense) 1787 { 1788 if (ha->tgt.enable_class_2) 1789 return 0; 1790 1791 if (sending_sense) 1792 return cmd->conf_compl_supported; 1793 else 1794 return ha->tgt.enable_explicit_conf && 1795 cmd->conf_compl_supported; 1796 } 1797 1798 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1799 /* 1800 * Original taken from the XFS code 1801 */ 1802 static unsigned long qlt_srr_random(void) 1803 { 1804 static int Inited; 1805 static unsigned long RandomValue; 1806 static DEFINE_SPINLOCK(lock); 1807 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1808 register long rv; 1809 register long lo; 1810 register long hi; 1811 unsigned long flags; 1812 1813 spin_lock_irqsave(&lock, flags); 1814 if (!Inited) { 1815 RandomValue = jiffies; 1816 Inited = 1; 1817 } 1818 rv = RandomValue; 1819 hi = rv / 127773; 1820 lo = rv % 127773; 1821 rv = 16807 * lo - 2836 * hi; 1822 if (rv <= 0) 1823 rv += 2147483647; 1824 RandomValue = rv; 1825 spin_unlock_irqrestore(&lock, flags); 1826 return rv; 1827 } 1828 1829 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1830 { 1831 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1832 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1833 == 50) { 1834 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1835 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1836 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1837 } 1838 #endif 1839 /* 1840 * It's currently not possible to simulate SRRs for FCP_WRITE without 1841 * a physical link layer failure, so don't even try here.. 1842 */ 1843 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1844 return; 1845 1846 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1847 ((qlt_srr_random() % 100) == 20)) { 1848 int i, leave = 0; 1849 unsigned int tot_len = 0; 1850 1851 while (leave == 0) 1852 leave = qlt_srr_random() % cmd->sg_cnt; 1853 1854 for (i = 0; i < leave; i++) 1855 tot_len += cmd->sg[i].length; 1856 1857 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1858 "Cutting cmd %p (tag %d) buffer" 1859 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1860 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1861 cmd->bufflen, cmd->sg_cnt); 1862 1863 cmd->bufflen = tot_len; 1864 cmd->sg_cnt = leave; 1865 } 1866 1867 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1868 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1869 1870 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1871 "Cutting cmd %p (tag %d) buffer head " 1872 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1873 cmd->bufflen); 1874 if (offset == 0) 1875 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1876 else if (qlt_set_data_offset(cmd, offset)) { 1877 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1878 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1879 } 1880 } 1881 } 1882 #else 1883 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1884 {} 1885 #endif 1886 1887 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1888 struct qla_tgt_prm *prm) 1889 { 1890 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1891 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1892 ctio->u.status0.flags |= 1893 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1894 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1895 ctio->u.status0.flags |= __constant_cpu_to_le16( 1896 CTIO7_FLAGS_EXPLICIT_CONFORM | 1897 CTIO7_FLAGS_CONFORM_REQ); 1898 } 1899 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1900 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1901 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1902 int i; 1903 1904 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1905 if (prm->cmd->se_cmd.scsi_status != 0) { 1906 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1907 "Skipping EXPLICIT_CONFORM and " 1908 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1909 "non GOOD status\n"); 1910 goto skip_explict_conf; 1911 } 1912 ctio->u.status1.flags |= __constant_cpu_to_le16( 1913 CTIO7_FLAGS_EXPLICIT_CONFORM | 1914 CTIO7_FLAGS_CONFORM_REQ); 1915 } 1916 skip_explict_conf: 1917 ctio->u.status1.flags &= 1918 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1919 ctio->u.status1.flags |= 1920 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1921 ctio->u.status1.scsi_status |= 1922 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 1923 ctio->u.status1.sense_length = 1924 cpu_to_le16(prm->sense_buffer_len); 1925 for (i = 0; i < prm->sense_buffer_len/4; i++) 1926 ((uint32_t *)ctio->u.status1.sense_data)[i] = 1927 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 1928 #if 0 1929 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 1930 static int q; 1931 if (q < 10) { 1932 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 1933 "qla_target(%d): %d bytes of sense " 1934 "lost", prm->tgt->ha->vp_idx, 1935 prm->sense_buffer_len % 4); 1936 q++; 1937 } 1938 } 1939 #endif 1940 } else { 1941 ctio->u.status1.flags &= 1942 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1943 ctio->u.status1.flags |= 1944 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1945 ctio->u.status1.sense_length = 0; 1946 memset(ctio->u.status1.sense_data, 0, 1947 sizeof(ctio->u.status1.sense_data)); 1948 } 1949 1950 /* Sense with len > 24, is it possible ??? */ 1951 } 1952 1953 1954 1955 /* diff */ 1956 static inline int 1957 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 1958 { 1959 /* 1960 * Uncomment when corresponding SCSI changes are done. 1961 * 1962 if (!sp->cmd->prot_chk) 1963 return 0; 1964 * 1965 */ 1966 switch (se_cmd->prot_op) { 1967 case TARGET_PROT_DOUT_INSERT: 1968 case TARGET_PROT_DIN_STRIP: 1969 if (ql2xenablehba_err_chk >= 1) 1970 return 1; 1971 break; 1972 case TARGET_PROT_DOUT_PASS: 1973 case TARGET_PROT_DIN_PASS: 1974 if (ql2xenablehba_err_chk >= 2) 1975 return 1; 1976 break; 1977 case TARGET_PROT_DIN_INSERT: 1978 case TARGET_PROT_DOUT_STRIP: 1979 return 1; 1980 default: 1981 break; 1982 } 1983 return 0; 1984 } 1985 1986 /* 1987 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 1988 * 1989 */ 1990 static inline void 1991 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) 1992 { 1993 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 1994 1995 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 1996 * have been immplemented by TCM, before AppTag is avail. 1997 * Look for modesense_handlers[] 1998 */ 1999 ctx->app_tag = 0; 2000 ctx->app_tag_mask[0] = 0x0; 2001 ctx->app_tag_mask[1] = 0x0; 2002 2003 switch (se_cmd->prot_type) { 2004 case TARGET_DIF_TYPE0_PROT: 2005 /* 2006 * No check for ql2xenablehba_err_chk, as it would be an 2007 * I/O error if hba tag generation is not done. 2008 */ 2009 ctx->ref_tag = cpu_to_le32(lba); 2010 2011 if (!qlt_hba_err_chk_enabled(se_cmd)) 2012 break; 2013 2014 /* enable ALL bytes of the ref tag */ 2015 ctx->ref_tag_mask[0] = 0xff; 2016 ctx->ref_tag_mask[1] = 0xff; 2017 ctx->ref_tag_mask[2] = 0xff; 2018 ctx->ref_tag_mask[3] = 0xff; 2019 break; 2020 /* 2021 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 2022 * 16 bit app tag. 2023 */ 2024 case TARGET_DIF_TYPE1_PROT: 2025 ctx->ref_tag = cpu_to_le32(lba); 2026 2027 if (!qlt_hba_err_chk_enabled(se_cmd)) 2028 break; 2029 2030 /* enable ALL bytes of the ref tag */ 2031 ctx->ref_tag_mask[0] = 0xff; 2032 ctx->ref_tag_mask[1] = 0xff; 2033 ctx->ref_tag_mask[2] = 0xff; 2034 ctx->ref_tag_mask[3] = 0xff; 2035 break; 2036 /* 2037 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 2038 * match LBA in CDB + N 2039 */ 2040 case TARGET_DIF_TYPE2_PROT: 2041 ctx->ref_tag = cpu_to_le32(lba); 2042 2043 if (!qlt_hba_err_chk_enabled(se_cmd)) 2044 break; 2045 2046 /* enable ALL bytes of the ref tag */ 2047 ctx->ref_tag_mask[0] = 0xff; 2048 ctx->ref_tag_mask[1] = 0xff; 2049 ctx->ref_tag_mask[2] = 0xff; 2050 ctx->ref_tag_mask[3] = 0xff; 2051 break; 2052 2053 /* For Type 3 protection: 16 bit GUARD only */ 2054 case TARGET_DIF_TYPE3_PROT: 2055 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2056 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2057 break; 2058 } 2059 } 2060 2061 2062 static inline int 2063 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2064 { 2065 uint32_t *cur_dsd; 2066 int sgc; 2067 uint32_t transfer_length = 0; 2068 uint32_t data_bytes; 2069 uint32_t dif_bytes; 2070 uint8_t bundling = 1; 2071 uint8_t *clr_ptr; 2072 struct crc_context *crc_ctx_pkt = NULL; 2073 struct qla_hw_data *ha; 2074 struct ctio_crc2_to_fw *pkt; 2075 dma_addr_t crc_ctx_dma; 2076 uint16_t fw_prot_opts = 0; 2077 struct qla_tgt_cmd *cmd = prm->cmd; 2078 struct se_cmd *se_cmd = &cmd->se_cmd; 2079 uint32_t h; 2080 struct atio_from_isp *atio = &prm->cmd->atio; 2081 uint16_t t16; 2082 2083 sgc = 0; 2084 ha = vha->hw; 2085 2086 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; 2087 prm->pkt = pkt; 2088 memset(pkt, 0, sizeof(*pkt)); 2089 2090 ql_dbg(ql_dbg_tgt, vha, 0xe071, 2091 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2092 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2093 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2094 2095 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2096 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2097 bundling = 0; 2098 2099 /* Compute dif len and adjust data len to incude protection */ 2100 data_bytes = cmd->bufflen; 2101 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2102 2103 switch (se_cmd->prot_op) { 2104 case TARGET_PROT_DIN_INSERT: 2105 case TARGET_PROT_DOUT_STRIP: 2106 transfer_length = data_bytes; 2107 data_bytes += dif_bytes; 2108 break; 2109 2110 case TARGET_PROT_DIN_STRIP: 2111 case TARGET_PROT_DOUT_INSERT: 2112 case TARGET_PROT_DIN_PASS: 2113 case TARGET_PROT_DOUT_PASS: 2114 transfer_length = data_bytes + dif_bytes; 2115 break; 2116 2117 default: 2118 BUG(); 2119 break; 2120 } 2121 2122 if (!qlt_hba_err_chk_enabled(se_cmd)) 2123 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2124 /* HBA error checking enabled */ 2125 else if (IS_PI_UNINIT_CAPABLE(ha)) { 2126 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2127 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2128 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 2129 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2130 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2131 } 2132 2133 switch (se_cmd->prot_op) { 2134 case TARGET_PROT_DIN_INSERT: 2135 case TARGET_PROT_DOUT_INSERT: 2136 fw_prot_opts |= PO_MODE_DIF_INSERT; 2137 break; 2138 case TARGET_PROT_DIN_STRIP: 2139 case TARGET_PROT_DOUT_STRIP: 2140 fw_prot_opts |= PO_MODE_DIF_REMOVE; 2141 break; 2142 case TARGET_PROT_DIN_PASS: 2143 case TARGET_PROT_DOUT_PASS: 2144 fw_prot_opts |= PO_MODE_DIF_PASS; 2145 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 2146 break; 2147 default:/* Normal Request */ 2148 fw_prot_opts |= PO_MODE_DIF_PASS; 2149 break; 2150 } 2151 2152 2153 /* ---- PKT ---- */ 2154 /* Update entry type to indicate Command Type CRC_2 IOCB */ 2155 pkt->entry_type = CTIO_CRC2; 2156 pkt->entry_count = 1; 2157 pkt->vp_index = vha->vp_idx; 2158 2159 h = qlt_make_handle(vha); 2160 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2161 /* 2162 * CTIO type 7 from the firmware doesn't provide a way to 2163 * know the initiator's LOOP ID, hence we can't find 2164 * the session and, so, the command. 2165 */ 2166 return -EAGAIN; 2167 } else 2168 ha->tgt.cmds[h-1] = prm->cmd; 2169 2170 2171 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2172 pkt->nport_handle = prm->cmd->loop_id; 2173 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2174 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2175 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2176 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2177 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2178 2179 /* silence compile warning */ 2180 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2181 pkt->ox_id = cpu_to_le16(t16); 2182 2183 t16 = (atio->u.isp24.attr << 9); 2184 pkt->flags |= cpu_to_le16(t16); 2185 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 2186 2187 /* Set transfer direction */ 2188 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2189 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2190 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2191 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2192 2193 2194 pkt->dseg_count = prm->tot_dsds; 2195 /* Fibre channel byte count */ 2196 pkt->transfer_length = cpu_to_le32(transfer_length); 2197 2198 2199 /* ----- CRC context -------- */ 2200 2201 /* Allocate CRC context from global pool */ 2202 crc_ctx_pkt = cmd->ctx = 2203 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 2204 2205 if (!crc_ctx_pkt) 2206 goto crc_queuing_error; 2207 2208 /* Zero out CTX area. */ 2209 clr_ptr = (uint8_t *)crc_ctx_pkt; 2210 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 2211 2212 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 2213 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 2214 2215 /* Set handle */ 2216 crc_ctx_pkt->handle = pkt->handle; 2217 2218 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); 2219 2220 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 2221 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 2222 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 2223 2224 2225 if (!bundling) { 2226 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 2227 } else { 2228 /* 2229 * Configure Bundling if we need to fetch interlaving 2230 * protection PCI accesses 2231 */ 2232 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 2233 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 2234 crc_ctx_pkt->u.bundling.dseg_count = 2235 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 2236 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 2237 } 2238 2239 /* Finish the common fields of CRC pkt */ 2240 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 2241 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 2242 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2243 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); 2244 2245 2246 /* Walks data segments */ 2247 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2248 2249 if (!bundling && prm->prot_seg_cnt) { 2250 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2251 prm->tot_dsds, cmd)) 2252 goto crc_queuing_error; 2253 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 2254 (prm->tot_dsds - prm->prot_seg_cnt), cmd)) 2255 goto crc_queuing_error; 2256 2257 if (bundling && prm->prot_seg_cnt) { 2258 /* Walks dif segments */ 2259 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 2260 2261 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 2262 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 2263 prm->prot_seg_cnt, cmd)) 2264 goto crc_queuing_error; 2265 } 2266 return QLA_SUCCESS; 2267 2268 crc_queuing_error: 2269 /* Cleanup will be performed by the caller */ 2270 2271 return QLA_FUNCTION_FAILED; 2272 } 2273 2274 2275 /* 2276 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 2277 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 2278 */ 2279 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 2280 uint8_t scsi_status) 2281 { 2282 struct scsi_qla_host *vha = cmd->vha; 2283 struct qla_hw_data *ha = vha->hw; 2284 struct ctio7_to_24xx *pkt; 2285 struct qla_tgt_prm prm; 2286 uint32_t full_req_cnt = 0; 2287 unsigned long flags = 0; 2288 int res; 2289 2290 memset(&prm, 0, sizeof(prm)); 2291 qlt_check_srr_debug(cmd, &xmit_type); 2292 2293 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 2294 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", 2295 (xmit_type & QLA_TGT_XMIT_STATUS) ? 2296 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 2297 &cmd->se_cmd); 2298 2299 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2300 &full_req_cnt); 2301 if (unlikely(res != 0)) { 2302 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 2303 return 0; 2304 2305 return res; 2306 } 2307 2308 spin_lock_irqsave(&ha->hardware_lock, flags); 2309 2310 /* Does F/W have an IOCBs for this request */ 2311 res = qlt_check_reserve_free_req(vha, full_req_cnt); 2312 if (unlikely(res)) 2313 goto out_unmap_unlock; 2314 2315 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 2316 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2317 else 2318 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2319 if (unlikely(res != 0)) 2320 goto out_unmap_unlock; 2321 2322 2323 pkt = (struct ctio7_to_24xx *)prm.pkt; 2324 2325 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 2326 pkt->u.status0.flags |= 2327 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2328 CTIO7_FLAGS_STATUS_MODE_0); 2329 2330 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2331 qlt_load_data_segments(&prm, vha); 2332 2333 if (prm.add_status_pkt == 0) { 2334 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2335 pkt->u.status0.scsi_status = 2336 cpu_to_le16(prm.rq_result); 2337 pkt->u.status0.residual = 2338 cpu_to_le32(prm.residual); 2339 pkt->u.status0.flags |= __constant_cpu_to_le16( 2340 CTIO7_FLAGS_SEND_STATUS); 2341 if (qlt_need_explicit_conf(ha, cmd, 0)) { 2342 pkt->u.status0.flags |= 2343 __constant_cpu_to_le16( 2344 CTIO7_FLAGS_EXPLICIT_CONFORM | 2345 CTIO7_FLAGS_CONFORM_REQ); 2346 } 2347 } 2348 2349 } else { 2350 /* 2351 * We have already made sure that there is sufficient 2352 * amount of request entries to not drop HW lock in 2353 * req_pkt(). 2354 */ 2355 struct ctio7_to_24xx *ctio = 2356 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 2357 2358 ql_dbg(ql_dbg_tgt, vha, 0xe019, 2359 "Building additional status packet\n"); 2360 2361 /* 2362 * T10Dif: ctio_crc2_to_fw overlay ontop of 2363 * ctio7_to_24xx 2364 */ 2365 memcpy(ctio, pkt, sizeof(*ctio)); 2366 /* reset back to CTIO7 */ 2367 ctio->entry_count = 1; 2368 ctio->entry_type = CTIO_TYPE7; 2369 ctio->dseg_count = 0; 2370 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 2371 CTIO7_FLAGS_DATA_IN); 2372 2373 /* Real finish is ctio_m1's finish */ 2374 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2375 pkt->u.status0.flags |= __constant_cpu_to_le16( 2376 CTIO7_FLAGS_DONT_RET_CTIO); 2377 2378 /* qlt_24xx_init_ctio_to_isp will correct 2379 * all neccessary fields that's part of CTIO7. 2380 * There should be no residual of CTIO-CRC2 data. 2381 */ 2382 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 2383 &prm); 2384 pr_debug("Status CTIO7: %p\n", ctio); 2385 } 2386 } else 2387 qlt_24xx_init_ctio_to_isp(pkt, &prm); 2388 2389 2390 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2391 2392 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 2393 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 2394 pkt, scsi_status); 2395 2396 qla2x00_start_iocbs(vha, vha->req); 2397 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2398 2399 return 0; 2400 2401 out_unmap_unlock: 2402 if (cmd->sg_mapped) 2403 qlt_unmap_sg(vha, cmd); 2404 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2405 2406 return res; 2407 } 2408 EXPORT_SYMBOL(qlt_xmit_response); 2409 2410 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2411 { 2412 struct ctio7_to_24xx *pkt; 2413 struct scsi_qla_host *vha = cmd->vha; 2414 struct qla_hw_data *ha = vha->hw; 2415 struct qla_tgt *tgt = cmd->tgt; 2416 struct qla_tgt_prm prm; 2417 unsigned long flags; 2418 int res = 0; 2419 2420 memset(&prm, 0, sizeof(prm)); 2421 prm.cmd = cmd; 2422 prm.tgt = tgt; 2423 prm.sg = NULL; 2424 prm.req_cnt = 1; 2425 2426 /* Send marker if required */ 2427 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2428 return -EIO; 2429 2430 ql_dbg(ql_dbg_tgt, vha, 0xe01b, 2431 "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", 2432 __func__, (int)vha->vp_idx, &cmd->se_cmd, 2433 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 2434 2435 /* Calculate number of entries and segments required */ 2436 if (qlt_pci_map_calc_cnt(&prm) != 0) 2437 return -EAGAIN; 2438 2439 spin_lock_irqsave(&ha->hardware_lock, flags); 2440 2441 /* Does F/W have an IOCBs for this request */ 2442 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2443 if (res != 0) 2444 goto out_unlock_free_unmap; 2445 if (cmd->se_cmd.prot_op) 2446 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2447 else 2448 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2449 2450 if (unlikely(res != 0)) 2451 goto out_unlock_free_unmap; 2452 pkt = (struct ctio7_to_24xx *)prm.pkt; 2453 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2454 CTIO7_FLAGS_STATUS_MODE_0); 2455 2456 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2457 qlt_load_data_segments(&prm, vha); 2458 2459 cmd->state = QLA_TGT_STATE_NEED_DATA; 2460 2461 qla2x00_start_iocbs(vha, vha->req); 2462 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2463 2464 return res; 2465 2466 out_unlock_free_unmap: 2467 if (cmd->sg_mapped) 2468 qlt_unmap_sg(vha, cmd); 2469 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2470 2471 return res; 2472 } 2473 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2474 2475 2476 /* 2477 * Checks the guard or meta-data for the type of error 2478 * detected by the HBA. 2479 */ 2480 static inline int 2481 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, 2482 struct ctio_crc_from_fw *sts) 2483 { 2484 uint8_t *ap = &sts->actual_dif[0]; 2485 uint8_t *ep = &sts->expected_dif[0]; 2486 uint32_t e_ref_tag, a_ref_tag; 2487 uint16_t e_app_tag, a_app_tag; 2488 uint16_t e_guard, a_guard; 2489 uint64_t lba = cmd->se_cmd.t_task_lba; 2490 2491 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 2492 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 2493 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 2494 2495 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 2496 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 2497 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 2498 2499 ql_dbg(ql_dbg_tgt, vha, 0xe075, 2500 "iocb(s) %p Returned STATUS.\n", sts); 2501 2502 ql_dbg(ql_dbg_tgt, vha, 0xf075, 2503 "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", 2504 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2505 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); 2506 2507 /* 2508 * Ignore sector if: 2509 * For type 3: ref & app tag is all 'f's 2510 * For type 0,1,2: app tag is all 'f's 2511 */ 2512 if ((a_app_tag == 0xffff) && 2513 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || 2514 (a_ref_tag == 0xffffffff))) { 2515 uint32_t blocks_done; 2516 2517 /* 2TB boundary case covered automatically with this */ 2518 blocks_done = e_ref_tag - (uint32_t)lba + 1; 2519 cmd->se_cmd.bad_sector = e_ref_tag; 2520 cmd->se_cmd.pi_err = 0; 2521 ql_dbg(ql_dbg_tgt, vha, 0xf074, 2522 "need to return scsi good\n"); 2523 2524 /* Update protection tag */ 2525 if (cmd->prot_sg_cnt) { 2526 uint32_t i, j = 0, k = 0, num_ent; 2527 struct scatterlist *sg, *sgl; 2528 2529 2530 sgl = cmd->prot_sg; 2531 2532 /* Patch the corresponding protection tags */ 2533 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { 2534 num_ent = sg_dma_len(sg) / 8; 2535 if (k + num_ent < blocks_done) { 2536 k += num_ent; 2537 continue; 2538 } 2539 j = blocks_done - k - 1; 2540 k = blocks_done; 2541 break; 2542 } 2543 2544 if (k != blocks_done) { 2545 ql_log(ql_log_warn, vha, 0xf076, 2546 "unexpected tag values tag:lba=%u:%llu)\n", 2547 e_ref_tag, (unsigned long long)lba); 2548 goto out; 2549 } 2550 2551 #if 0 2552 struct sd_dif_tuple *spt; 2553 /* TODO: 2554 * This section came from initiator. Is it valid here? 2555 * should ulp be override with actual val??? 2556 */ 2557 spt = page_address(sg_page(sg)) + sg->offset; 2558 spt += j; 2559 2560 spt->app_tag = 0xffff; 2561 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) 2562 spt->ref_tag = 0xffffffff; 2563 #endif 2564 } 2565 2566 return 0; 2567 } 2568 2569 /* check guard */ 2570 if (e_guard != a_guard) { 2571 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 2572 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 2573 2574 ql_log(ql_log_warn, vha, 0xe076, 2575 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2576 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2577 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2578 a_guard, e_guard, cmd); 2579 goto out; 2580 } 2581 2582 /* check ref tag */ 2583 if (e_ref_tag != a_ref_tag) { 2584 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 2585 cmd->se_cmd.bad_sector = e_ref_tag; 2586 2587 ql_log(ql_log_warn, vha, 0xe077, 2588 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2589 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2590 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2591 a_guard, e_guard, cmd); 2592 goto out; 2593 } 2594 2595 /* check appl tag */ 2596 if (e_app_tag != a_app_tag) { 2597 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; 2598 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; 2599 2600 ql_log(ql_log_warn, vha, 0xe078, 2601 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", 2602 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2603 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, 2604 a_guard, e_guard, cmd); 2605 goto out; 2606 } 2607 out: 2608 return 1; 2609 } 2610 2611 2612 /* If hardware_lock held on entry, might drop it, then reaquire */ 2613 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2614 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2615 struct qla_tgt_cmd *cmd, 2616 struct atio_from_isp *atio) 2617 { 2618 struct ctio7_to_24xx *ctio24; 2619 struct qla_hw_data *ha = vha->hw; 2620 request_t *pkt; 2621 int ret = 0; 2622 2623 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2624 2625 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2626 if (pkt == NULL) { 2627 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2628 "qla_target(%d): %s failed: unable to allocate " 2629 "request packet\n", vha->vp_idx, __func__); 2630 return -ENOMEM; 2631 } 2632 2633 if (cmd != NULL) { 2634 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2635 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2636 "qla_target(%d): Terminating cmd %p with " 2637 "incorrect state %d\n", vha->vp_idx, cmd, 2638 cmd->state); 2639 } else 2640 ret = 1; 2641 } 2642 2643 pkt->entry_count = 1; 2644 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2645 2646 ctio24 = (struct ctio7_to_24xx *)pkt; 2647 ctio24->entry_type = CTIO_TYPE7; 2648 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2649 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2650 ctio24->vp_index = vha->vp_idx; 2651 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2652 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2653 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2654 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2655 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2656 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2657 CTIO7_FLAGS_TERMINATE); 2658 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2659 2660 /* Most likely, it isn't needed */ 2661 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2662 &atio->u.isp24.fcp_cmnd.add_cdb[ 2663 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2664 if (ctio24->u.status1.residual != 0) 2665 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2666 2667 qla2x00_start_iocbs(vha, vha->req); 2668 return ret; 2669 } 2670 2671 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2672 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2673 { 2674 unsigned long flags; 2675 int rc; 2676 2677 if (qlt_issue_marker(vha, ha_locked) < 0) 2678 return; 2679 2680 if (ha_locked) { 2681 rc = __qlt_send_term_exchange(vha, cmd, atio); 2682 goto done; 2683 } 2684 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2685 rc = __qlt_send_term_exchange(vha, cmd, atio); 2686 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2687 done: 2688 /* 2689 * Terminate exchange will tell fw to release any active CTIO 2690 * that's in FW posession and cleanup the exchange. 2691 * 2692 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still 2693 * down at FW. Free the cmd later when CTIO comes back later 2694 * w/aborted(0x2) status. 2695 * 2696 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already 2697 * back w/some err. Free the cmd now. 2698 */ 2699 if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { 2700 if (!ha_locked && !in_interrupt()) 2701 msleep(250); /* just in case */ 2702 2703 if (cmd->sg_mapped) 2704 qlt_unmap_sg(vha, cmd); 2705 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2706 } 2707 return; 2708 } 2709 2710 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2711 { 2712 struct qla_tgt_sess *sess = cmd->sess; 2713 2714 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 2715 "%s: se_cmd[%p] ox_id %04x\n", 2716 __func__, &cmd->se_cmd, 2717 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 2718 2719 BUG_ON(cmd->sg_mapped); 2720 if (unlikely(cmd->free_sg)) 2721 kfree(cmd->sg); 2722 2723 if (!sess || !sess->se_sess) { 2724 WARN_ON(1); 2725 return; 2726 } 2727 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 2728 } 2729 EXPORT_SYMBOL(qlt_free_cmd); 2730 2731 /* ha->hardware_lock supposed to be held on entry */ 2732 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2733 struct qla_tgt_cmd *cmd, void *ctio) 2734 { 2735 struct qla_tgt_srr_ctio *sc; 2736 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 2737 struct qla_tgt_srr_imm *imm; 2738 2739 tgt->ctio_srr_id++; 2740 2741 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2742 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2743 2744 if (!ctio) { 2745 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2746 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2747 vha->vp_idx); 2748 return -EINVAL; 2749 } 2750 2751 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2752 if (sc != NULL) { 2753 sc->cmd = cmd; 2754 /* IRQ is already OFF */ 2755 spin_lock(&tgt->srr_lock); 2756 sc->srr_id = tgt->ctio_srr_id; 2757 list_add_tail(&sc->srr_list_entry, 2758 &tgt->srr_ctio_list); 2759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2760 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2761 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2762 int found = 0; 2763 list_for_each_entry(imm, &tgt->srr_imm_list, 2764 srr_list_entry) { 2765 if (imm->srr_id == sc->srr_id) { 2766 found = 1; 2767 break; 2768 } 2769 } 2770 if (found) { 2771 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2772 "Scheduling srr work\n"); 2773 schedule_work(&tgt->srr_work); 2774 } else { 2775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2776 "qla_target(%d): imm_srr_id " 2777 "== ctio_srr_id (%d), but there is no " 2778 "corresponding SRR IMM, deleting CTIO " 2779 "SRR %p\n", vha->vp_idx, 2780 tgt->ctio_srr_id, sc); 2781 list_del(&sc->srr_list_entry); 2782 spin_unlock(&tgt->srr_lock); 2783 2784 kfree(sc); 2785 return -EINVAL; 2786 } 2787 } 2788 spin_unlock(&tgt->srr_lock); 2789 } else { 2790 struct qla_tgt_srr_imm *ti; 2791 2792 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2793 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2794 vha->vp_idx); 2795 spin_lock(&tgt->srr_lock); 2796 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2797 srr_list_entry) { 2798 if (imm->srr_id == tgt->ctio_srr_id) { 2799 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2800 "IMM SRR %p deleted (id %d)\n", 2801 imm, imm->srr_id); 2802 list_del(&imm->srr_list_entry); 2803 qlt_reject_free_srr_imm(vha, imm, 1); 2804 } 2805 } 2806 spin_unlock(&tgt->srr_lock); 2807 2808 return -ENOMEM; 2809 } 2810 2811 return 0; 2812 } 2813 2814 /* 2815 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2816 */ 2817 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2818 struct qla_tgt_cmd *cmd, uint32_t status) 2819 { 2820 int term = 0; 2821 2822 if (ctio != NULL) { 2823 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2824 term = !(c->flags & 2825 __constant_cpu_to_le16(OF_TERM_EXCH)); 2826 } else 2827 term = 1; 2828 2829 if (term) 2830 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2831 2832 return term; 2833 } 2834 2835 /* ha->hardware_lock supposed to be held on entry */ 2836 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2837 uint32_t handle) 2838 { 2839 struct qla_hw_data *ha = vha->hw; 2840 2841 handle--; 2842 if (ha->tgt.cmds[handle] != NULL) { 2843 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2844 ha->tgt.cmds[handle] = NULL; 2845 return cmd; 2846 } else 2847 return NULL; 2848 } 2849 2850 /* ha->hardware_lock supposed to be held on entry */ 2851 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2852 uint32_t handle, void *ctio) 2853 { 2854 struct qla_tgt_cmd *cmd = NULL; 2855 2856 /* Clear out internal marks */ 2857 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2858 CTIO_INTERMEDIATE_HANDLE_MARK); 2859 2860 if (handle != QLA_TGT_NULL_HANDLE) { 2861 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2862 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2863 "SKIP_HANDLE CTIO\n"); 2864 return NULL; 2865 } 2866 /* handle-1 is actually used */ 2867 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2868 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2869 "qla_target(%d): Wrong handle %x received\n", 2870 vha->vp_idx, handle); 2871 return NULL; 2872 } 2873 cmd = qlt_get_cmd(vha, handle); 2874 if (unlikely(cmd == NULL)) { 2875 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2876 "qla_target(%d): Suspicious: unable to " 2877 "find the command with handle %x\n", vha->vp_idx, 2878 handle); 2879 return NULL; 2880 } 2881 } else if (ctio != NULL) { 2882 /* We can't get loop ID from CTIO7 */ 2883 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2884 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2885 "support NULL handles\n", vha->vp_idx); 2886 return NULL; 2887 } 2888 2889 return cmd; 2890 } 2891 2892 /* 2893 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2894 */ 2895 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2896 uint32_t status, void *ctio) 2897 { 2898 struct qla_hw_data *ha = vha->hw; 2899 struct se_cmd *se_cmd; 2900 struct target_core_fabric_ops *tfo; 2901 struct qla_tgt_cmd *cmd; 2902 2903 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2904 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2905 vha->vp_idx, ctio, status, handle); 2906 2907 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2908 /* That could happen only in case of an error/reset/abort */ 2909 if (status != CTIO_SUCCESS) { 2910 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2911 "Intermediate CTIO received" 2912 " (status %x)\n", status); 2913 } 2914 return; 2915 } 2916 2917 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2918 if (cmd == NULL) 2919 return; 2920 2921 se_cmd = &cmd->se_cmd; 2922 tfo = se_cmd->se_tfo; 2923 2924 if (cmd->sg_mapped) 2925 qlt_unmap_sg(vha, cmd); 2926 2927 if (unlikely(status != CTIO_SUCCESS)) { 2928 switch (status & 0xFFFF) { 2929 case CTIO_LIP_RESET: 2930 case CTIO_TARGET_RESET: 2931 case CTIO_ABORTED: 2932 /* driver request abort via Terminate exchange */ 2933 case CTIO_TIMEOUT: 2934 case CTIO_INVALID_RX_ID: 2935 /* They are OK */ 2936 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2937 "qla_target(%d): CTIO with " 2938 "status %#x received, state %x, se_cmd %p, " 2939 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2940 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2941 status, cmd->state, se_cmd); 2942 break; 2943 2944 case CTIO_PORT_LOGGED_OUT: 2945 case CTIO_PORT_UNAVAILABLE: 2946 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2947 "qla_target(%d): CTIO with PORT LOGGED " 2948 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2949 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2950 status, cmd->state, se_cmd); 2951 break; 2952 2953 case CTIO_SRR_RECEIVED: 2954 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2955 "qla_target(%d): CTIO with SRR_RECEIVED" 2956 " status %x received (state %x, se_cmd %p)\n", 2957 vha->vp_idx, status, cmd->state, se_cmd); 2958 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2959 break; 2960 else 2961 return; 2962 2963 case CTIO_DIF_ERROR: { 2964 struct ctio_crc_from_fw *crc = 2965 (struct ctio_crc_from_fw *)ctio; 2966 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 2967 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", 2968 vha->vp_idx, status, cmd->state, se_cmd, 2969 *((u64 *)&crc->actual_dif[0]), 2970 *((u64 *)&crc->expected_dif[0])); 2971 2972 if (qlt_handle_dif_error(vha, cmd, ctio)) { 2973 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2974 /* scsi Write/xfer rdy complete */ 2975 goto skip_term; 2976 } else { 2977 /* scsi read/xmit respond complete 2978 * call handle dif to send scsi status 2979 * rather than terminate exchange. 2980 */ 2981 cmd->state = QLA_TGT_STATE_PROCESSED; 2982 ha->tgt.tgt_ops->handle_dif_err(cmd); 2983 return; 2984 } 2985 } else { 2986 /* Need to generate a SCSI good completion. 2987 * because FW did not send scsi status. 2988 */ 2989 status = 0; 2990 goto skip_term; 2991 } 2992 break; 2993 } 2994 default: 2995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 2996 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 2997 vha->vp_idx, status, cmd->state, se_cmd); 2998 break; 2999 } 3000 3001 3002 /* "cmd->state == QLA_TGT_STATE_ABORTED" means 3003 * cmd is already aborted/terminated, we don't 3004 * need to terminate again. The exchange is already 3005 * cleaned up/freed at FW level. Just cleanup at driver 3006 * level. 3007 */ 3008 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3009 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3010 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3011 return; 3012 } 3013 } 3014 skip_term: 3015 3016 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3017 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 3018 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3019 int rx_status = 0; 3020 3021 cmd->state = QLA_TGT_STATE_DATA_IN; 3022 3023 if (unlikely(status != CTIO_SUCCESS)) 3024 rx_status = -EIO; 3025 else 3026 cmd->write_data_transferred = 1; 3027 3028 ql_dbg(ql_dbg_tgt, vha, 0xe020, 3029 "Data received, context %x, rx_status %d\n", 3030 0x0, rx_status); 3031 3032 ha->tgt.tgt_ops->handle_data(cmd); 3033 return; 3034 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3035 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3036 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 3037 } else { 3038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3039 "qla_target(%d): A command in state (%d) should " 3040 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3041 } 3042 3043 if (unlikely(status != CTIO_SUCCESS) && 3044 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 3046 dump_stack(); 3047 } 3048 3049 ha->tgt.tgt_ops->free_cmd(cmd); 3050 } 3051 3052 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 3053 uint8_t task_codes) 3054 { 3055 int fcp_task_attr; 3056 3057 switch (task_codes) { 3058 case ATIO_SIMPLE_QUEUE: 3059 fcp_task_attr = MSG_SIMPLE_TAG; 3060 break; 3061 case ATIO_HEAD_OF_QUEUE: 3062 fcp_task_attr = MSG_HEAD_TAG; 3063 break; 3064 case ATIO_ORDERED_QUEUE: 3065 fcp_task_attr = MSG_ORDERED_TAG; 3066 break; 3067 case ATIO_ACA_QUEUE: 3068 fcp_task_attr = MSG_ACA_TAG; 3069 break; 3070 case ATIO_UNTAGGED: 3071 fcp_task_attr = MSG_SIMPLE_TAG; 3072 break; 3073 default: 3074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3075 "qla_target: unknown task code %x, use ORDERED instead\n", 3076 task_codes); 3077 fcp_task_attr = MSG_ORDERED_TAG; 3078 break; 3079 } 3080 3081 return fcp_task_attr; 3082 } 3083 3084 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 3085 uint8_t *); 3086 /* 3087 * Process context for I/O path into tcm_qla2xxx code 3088 */ 3089 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 3090 { 3091 scsi_qla_host_t *vha = cmd->vha; 3092 struct qla_hw_data *ha = vha->hw; 3093 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3094 struct qla_tgt_sess *sess = cmd->sess; 3095 struct atio_from_isp *atio = &cmd->atio; 3096 unsigned char *cdb; 3097 unsigned long flags; 3098 uint32_t data_length; 3099 int ret, fcp_task_attr, data_dir, bidi = 0; 3100 3101 if (tgt->tgt_stop) 3102 goto out_term; 3103 3104 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3105 cmd->tag = atio->u.isp24.exchange_addr; 3106 cmd->unpacked_lun = scsilun_to_int( 3107 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 3108 3109 if (atio->u.isp24.fcp_cmnd.rddata && 3110 atio->u.isp24.fcp_cmnd.wrdata) { 3111 bidi = 1; 3112 data_dir = DMA_TO_DEVICE; 3113 } else if (atio->u.isp24.fcp_cmnd.rddata) 3114 data_dir = DMA_FROM_DEVICE; 3115 else if (atio->u.isp24.fcp_cmnd.wrdata) 3116 data_dir = DMA_TO_DEVICE; 3117 else 3118 data_dir = DMA_NONE; 3119 3120 fcp_task_attr = qlt_get_fcp_task_attr(vha, 3121 atio->u.isp24.fcp_cmnd.task_attr); 3122 data_length = be32_to_cpu(get_unaligned((uint32_t *) 3123 &atio->u.isp24.fcp_cmnd.add_cdb[ 3124 atio->u.isp24.fcp_cmnd.add_cdb_len])); 3125 3126 ql_dbg(ql_dbg_tgt, vha, 0xe022, 3127 "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", 3128 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, 3129 cmd->atio.u.isp24.fcp_hdr.ox_id); 3130 3131 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3132 fcp_task_attr, data_dir, bidi); 3133 if (ret != 0) 3134 goto out_term; 3135 /* 3136 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 3137 */ 3138 spin_lock_irqsave(&ha->hardware_lock, flags); 3139 ha->tgt.tgt_ops->put_sess(sess); 3140 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3141 return; 3142 3143 out_term: 3144 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 3145 /* 3146 * cmd has not sent to target yet, so pass NULL as the second 3147 * argument to qlt_send_term_exchange() and free the memory here. 3148 */ 3149 spin_lock_irqsave(&ha->hardware_lock, flags); 3150 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3151 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3152 ha->tgt.tgt_ops->put_sess(sess); 3153 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3154 } 3155 3156 static void qlt_do_work(struct work_struct *work) 3157 { 3158 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3159 3160 __qlt_do_work(cmd); 3161 } 3162 3163 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 3164 struct qla_tgt_sess *sess, 3165 struct atio_from_isp *atio) 3166 { 3167 struct se_session *se_sess = sess->se_sess; 3168 struct qla_tgt_cmd *cmd; 3169 int tag; 3170 3171 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 3172 if (tag < 0) 3173 return NULL; 3174 3175 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 3176 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 3177 3178 memcpy(&cmd->atio, atio, sizeof(*atio)); 3179 cmd->state = QLA_TGT_STATE_NEW; 3180 cmd->tgt = vha->vha_tgt.qla_tgt; 3181 cmd->vha = vha; 3182 cmd->se_cmd.map_tag = tag; 3183 cmd->sess = sess; 3184 cmd->loop_id = sess->loop_id; 3185 cmd->conf_compl_supported = sess->conf_compl_supported; 3186 3187 return cmd; 3188 } 3189 3190 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, 3191 uint16_t); 3192 3193 static void qlt_create_sess_from_atio(struct work_struct *work) 3194 { 3195 struct qla_tgt_sess_op *op = container_of(work, 3196 struct qla_tgt_sess_op, work); 3197 scsi_qla_host_t *vha = op->vha; 3198 struct qla_hw_data *ha = vha->hw; 3199 struct qla_tgt_sess *sess; 3200 struct qla_tgt_cmd *cmd; 3201 unsigned long flags; 3202 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3203 3204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3205 "qla_target(%d): Unable to find wwn login" 3206 " (s_id %x:%x:%x), trying to create it manually\n", 3207 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3208 3209 if (op->atio.u.raw.entry_count > 1) { 3210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3211 "Dropping multy entry atio %p\n", &op->atio); 3212 goto out_term; 3213 } 3214 3215 mutex_lock(&vha->vha_tgt.tgt_mutex); 3216 sess = qlt_make_local_sess(vha, s_id); 3217 /* sess has an extra creation ref. */ 3218 mutex_unlock(&vha->vha_tgt.tgt_mutex); 3219 3220 if (!sess) 3221 goto out_term; 3222 /* 3223 * Now obtain a pre-allocated session tag using the original op->atio 3224 * packet header, and dispatch into __qlt_do_work() using the existing 3225 * process context. 3226 */ 3227 cmd = qlt_get_tag(vha, sess, &op->atio); 3228 if (!cmd) { 3229 spin_lock_irqsave(&ha->hardware_lock, flags); 3230 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); 3231 ha->tgt.tgt_ops->put_sess(sess); 3232 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3233 kfree(op); 3234 return; 3235 } 3236 /* 3237 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release 3238 * the extra reference taken above by qlt_make_local_sess() 3239 */ 3240 __qlt_do_work(cmd); 3241 kfree(op); 3242 return; 3243 3244 out_term: 3245 spin_lock_irqsave(&ha->hardware_lock, flags); 3246 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3247 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3248 kfree(op); 3249 3250 } 3251 3252 /* ha->hardware_lock supposed to be held on entry */ 3253 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 3254 struct atio_from_isp *atio) 3255 { 3256 struct qla_hw_data *ha = vha->hw; 3257 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3258 struct qla_tgt_sess *sess; 3259 struct qla_tgt_cmd *cmd; 3260 3261 if (unlikely(tgt->tgt_stop)) { 3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 3263 "New command while device %p is shutting down\n", tgt); 3264 return -EFAULT; 3265 } 3266 3267 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 3268 if (unlikely(!sess)) { 3269 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), 3270 GFP_ATOMIC); 3271 if (!op) 3272 return -ENOMEM; 3273 3274 memcpy(&op->atio, atio, sizeof(*atio)); 3275 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3276 queue_work(qla_tgt_wq, &op->work); 3277 return 0; 3278 } 3279 /* 3280 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3281 */ 3282 kref_get(&sess->se_sess->sess_kref); 3283 3284 cmd = qlt_get_tag(vha, sess, atio); 3285 if (!cmd) { 3286 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 3287 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3288 ha->tgt.tgt_ops->put_sess(sess); 3289 return -ENOMEM; 3290 } 3291 3292 INIT_WORK(&cmd->work, qlt_do_work); 3293 queue_work(qla_tgt_wq, &cmd->work); 3294 return 0; 3295 3296 } 3297 3298 /* ha->hardware_lock supposed to be held on entry */ 3299 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 3300 int fn, void *iocb, int flags) 3301 { 3302 struct scsi_qla_host *vha = sess->vha; 3303 struct qla_hw_data *ha = vha->hw; 3304 struct qla_tgt_mgmt_cmd *mcmd; 3305 int res; 3306 uint8_t tmr_func; 3307 3308 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 3309 if (!mcmd) { 3310 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 3311 "qla_target(%d): Allocation of management " 3312 "command failed, some commands and their data could " 3313 "leak\n", vha->vp_idx); 3314 return -ENOMEM; 3315 } 3316 memset(mcmd, 0, sizeof(*mcmd)); 3317 mcmd->sess = sess; 3318 3319 if (iocb) { 3320 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 3321 sizeof(mcmd->orig_iocb.imm_ntfy)); 3322 } 3323 mcmd->tmr_func = fn; 3324 mcmd->flags = flags; 3325 3326 switch (fn) { 3327 case QLA_TGT_CLEAR_ACA: 3328 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 3329 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 3330 tmr_func = TMR_CLEAR_ACA; 3331 break; 3332 3333 case QLA_TGT_TARGET_RESET: 3334 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 3335 "qla_target(%d): TARGET_RESET received\n", 3336 sess->vha->vp_idx); 3337 tmr_func = TMR_TARGET_WARM_RESET; 3338 break; 3339 3340 case QLA_TGT_LUN_RESET: 3341 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3342 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3343 tmr_func = TMR_LUN_RESET; 3344 break; 3345 3346 case QLA_TGT_CLEAR_TS: 3347 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 3348 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 3349 tmr_func = TMR_CLEAR_TASK_SET; 3350 break; 3351 3352 case QLA_TGT_ABORT_TS: 3353 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 3354 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 3355 tmr_func = TMR_ABORT_TASK_SET; 3356 break; 3357 #if 0 3358 case QLA_TGT_ABORT_ALL: 3359 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 3360 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 3361 sess->vha->vp_idx); 3362 tmr_func = 0; 3363 break; 3364 3365 case QLA_TGT_ABORT_ALL_SESS: 3366 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 3367 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 3368 sess->vha->vp_idx); 3369 tmr_func = 0; 3370 break; 3371 3372 case QLA_TGT_NEXUS_LOSS_SESS: 3373 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 3374 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 3375 sess->vha->vp_idx); 3376 tmr_func = 0; 3377 break; 3378 3379 case QLA_TGT_NEXUS_LOSS: 3380 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 3381 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 3382 tmr_func = 0; 3383 break; 3384 #endif 3385 default: 3386 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 3387 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 3388 sess->vha->vp_idx, fn); 3389 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3390 return -ENOSYS; 3391 } 3392 3393 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 3394 if (res != 0) { 3395 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 3396 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 3397 sess->vha->vp_idx, res); 3398 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3399 return -EFAULT; 3400 } 3401 3402 return 0; 3403 } 3404 3405 /* ha->hardware_lock supposed to be held on entry */ 3406 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 3407 { 3408 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 3409 struct qla_hw_data *ha = vha->hw; 3410 struct qla_tgt *tgt; 3411 struct qla_tgt_sess *sess; 3412 uint32_t lun, unpacked_lun; 3413 int lun_size, fn; 3414 3415 tgt = vha->vha_tgt.qla_tgt; 3416 3417 lun = a->u.isp24.fcp_cmnd.lun; 3418 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 3419 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 3420 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3421 a->u.isp24.fcp_hdr.s_id); 3422 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 3423 3424 if (!sess) { 3425 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 3426 "qla_target(%d): task mgmt fn 0x%x for " 3427 "non-existant session\n", vha->vp_idx, fn); 3428 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 3429 sizeof(struct atio_from_isp)); 3430 } 3431 3432 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3433 } 3434 3435 /* ha->hardware_lock supposed to be held on entry */ 3436 static int __qlt_abort_task(struct scsi_qla_host *vha, 3437 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 3438 { 3439 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 3440 struct qla_hw_data *ha = vha->hw; 3441 struct qla_tgt_mgmt_cmd *mcmd; 3442 uint32_t lun, unpacked_lun; 3443 int rc; 3444 3445 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 3446 if (mcmd == NULL) { 3447 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 3448 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 3449 vha->vp_idx, __func__); 3450 return -ENOMEM; 3451 } 3452 memset(mcmd, 0, sizeof(*mcmd)); 3453 3454 mcmd->sess = sess; 3455 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 3456 sizeof(mcmd->orig_iocb.imm_ntfy)); 3457 3458 lun = a->u.isp24.fcp_cmnd.lun; 3459 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 3460 3461 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 3462 le16_to_cpu(iocb->u.isp2x.seq_id)); 3463 if (rc != 0) { 3464 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 3465 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 3466 vha->vp_idx, rc); 3467 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 3468 return -EFAULT; 3469 } 3470 3471 return 0; 3472 } 3473 3474 /* ha->hardware_lock supposed to be held on entry */ 3475 static int qlt_abort_task(struct scsi_qla_host *vha, 3476 struct imm_ntfy_from_isp *iocb) 3477 { 3478 struct qla_hw_data *ha = vha->hw; 3479 struct qla_tgt_sess *sess; 3480 int loop_id; 3481 3482 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 3483 3484 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 3485 if (sess == NULL) { 3486 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 3487 "qla_target(%d): task abort for unexisting " 3488 "session\n", vha->vp_idx); 3489 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 3490 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 3491 } 3492 3493 return __qlt_abort_task(vha, iocb, sess); 3494 } 3495 3496 /* 3497 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3498 */ 3499 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 3500 struct imm_ntfy_from_isp *iocb) 3501 { 3502 int res = 0; 3503 3504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 3505 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 3506 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 3507 3508 switch (iocb->u.isp24.status_subcode) { 3509 case ELS_PLOGI: 3510 case ELS_FLOGI: 3511 case ELS_PRLI: 3512 case ELS_LOGO: 3513 case ELS_PRLO: 3514 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 3515 break; 3516 case ELS_PDISC: 3517 case ELS_ADISC: 3518 { 3519 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3520 if (tgt->link_reinit_iocb_pending) { 3521 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3522 0, 0, 0, 0, 0, 0); 3523 tgt->link_reinit_iocb_pending = 0; 3524 } 3525 res = 1; /* send notify ack */ 3526 break; 3527 } 3528 3529 default: 3530 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 3531 "qla_target(%d): Unsupported ELS command %x " 3532 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 3533 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 3534 break; 3535 } 3536 3537 return res; 3538 } 3539 3540 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 3541 { 3542 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 3543 size_t first_offset = 0, rem_offset = offset, tmp = 0; 3544 int i, sg_srr_cnt, bufflen = 0; 3545 3546 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 3547 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 3548 "cmd->sg_cnt: %u, direction: %d\n", 3549 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 3550 3551 /* 3552 * FIXME: Reject non zero SRR relative offset until we can test 3553 * this code properly. 3554 */ 3555 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 3556 return -1; 3557 3558 if (!cmd->sg || !cmd->sg_cnt) { 3559 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 3560 "Missing cmd->sg or zero cmd->sg_cnt in" 3561 " qla_tgt_set_data_offset\n"); 3562 return -EINVAL; 3563 } 3564 /* 3565 * Walk the current cmd->sg list until we locate the new sg_srr_start 3566 */ 3567 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 3568 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 3569 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 3570 i, sg, sg_page(sg), sg->length, sg->offset); 3571 3572 if ((sg->length + tmp) > offset) { 3573 first_offset = rem_offset; 3574 sg_srr_start = sg; 3575 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 3576 "Found matching sg[%d], using %p as sg_srr_start, " 3577 "and using first_offset: %zu\n", i, sg, 3578 first_offset); 3579 break; 3580 } 3581 tmp += sg->length; 3582 rem_offset -= sg->length; 3583 } 3584 3585 if (!sg_srr_start) { 3586 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 3587 "Unable to locate sg_srr_start for offset: %u\n", offset); 3588 return -EINVAL; 3589 } 3590 sg_srr_cnt = (cmd->sg_cnt - i); 3591 3592 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 3593 if (!sg_srr) { 3594 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 3595 "Unable to allocate sgp\n"); 3596 return -ENOMEM; 3597 } 3598 sg_init_table(sg_srr, sg_srr_cnt); 3599 sgp = &sg_srr[0]; 3600 /* 3601 * Walk the remaining list for sg_srr_start, mapping to the newly 3602 * allocated sg_srr taking first_offset into account. 3603 */ 3604 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 3605 if (first_offset) { 3606 sg_set_page(sgp, sg_page(sg), 3607 (sg->length - first_offset), first_offset); 3608 first_offset = 0; 3609 } else { 3610 sg_set_page(sgp, sg_page(sg), sg->length, 0); 3611 } 3612 bufflen += sgp->length; 3613 3614 sgp = sg_next(sgp); 3615 if (!sgp) 3616 break; 3617 } 3618 3619 cmd->sg = sg_srr; 3620 cmd->sg_cnt = sg_srr_cnt; 3621 cmd->bufflen = bufflen; 3622 cmd->offset += offset; 3623 cmd->free_sg = 1; 3624 3625 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 3626 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 3627 cmd->sg_cnt); 3628 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 3629 cmd->bufflen); 3630 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 3631 cmd->offset); 3632 3633 if (cmd->sg_cnt < 0) 3634 BUG(); 3635 3636 if (cmd->bufflen < 0) 3637 BUG(); 3638 3639 return 0; 3640 } 3641 3642 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 3643 uint32_t srr_rel_offs, int *xmit_type) 3644 { 3645 int res = 0, rel_offs; 3646 3647 rel_offs = srr_rel_offs - cmd->offset; 3648 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 3649 srr_rel_offs, rel_offs); 3650 3651 *xmit_type = QLA_TGT_XMIT_ALL; 3652 3653 if (rel_offs < 0) { 3654 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 3655 "qla_target(%d): SRR rel_offs (%d) < 0", 3656 cmd->vha->vp_idx, rel_offs); 3657 res = -1; 3658 } else if (rel_offs == cmd->bufflen) 3659 *xmit_type = QLA_TGT_XMIT_STATUS; 3660 else if (rel_offs > 0) 3661 res = qlt_set_data_offset(cmd, rel_offs); 3662 3663 return res; 3664 } 3665 3666 /* No locks, thread context */ 3667 static void qlt_handle_srr(struct scsi_qla_host *vha, 3668 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 3669 { 3670 struct imm_ntfy_from_isp *ntfy = 3671 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 3672 struct qla_hw_data *ha = vha->hw; 3673 struct qla_tgt_cmd *cmd = sctio->cmd; 3674 struct se_cmd *se_cmd = &cmd->se_cmd; 3675 unsigned long flags; 3676 int xmit_type = 0, resp = 0; 3677 uint32_t offset; 3678 uint16_t srr_ui; 3679 3680 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 3681 srr_ui = ntfy->u.isp24.srr_ui; 3682 3683 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 3684 cmd, srr_ui); 3685 3686 switch (srr_ui) { 3687 case SRR_IU_STATUS: 3688 spin_lock_irqsave(&ha->hardware_lock, flags); 3689 qlt_send_notify_ack(vha, ntfy, 3690 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3691 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3692 xmit_type = QLA_TGT_XMIT_STATUS; 3693 resp = 1; 3694 break; 3695 case SRR_IU_DATA_IN: 3696 if (!cmd->sg || !cmd->sg_cnt) { 3697 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3698 "Unable to process SRR_IU_DATA_IN due to" 3699 " missing cmd->sg, state: %d\n", cmd->state); 3700 dump_stack(); 3701 goto out_reject; 3702 } 3703 if (se_cmd->scsi_status != 0) { 3704 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3705 "Rejecting SRR_IU_DATA_IN with non GOOD " 3706 "scsi_status\n"); 3707 goto out_reject; 3708 } 3709 cmd->bufflen = se_cmd->data_length; 3710 3711 if (qlt_has_data(cmd)) { 3712 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3713 goto out_reject; 3714 spin_lock_irqsave(&ha->hardware_lock, flags); 3715 qlt_send_notify_ack(vha, ntfy, 3716 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3717 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3718 resp = 1; 3719 } else { 3720 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3721 "qla_target(%d): SRR for in data for cmd " 3722 "without them (tag %d, SCSI status %d), " 3723 "reject", vha->vp_idx, cmd->tag, 3724 cmd->se_cmd.scsi_status); 3725 goto out_reject; 3726 } 3727 break; 3728 case SRR_IU_DATA_OUT: 3729 if (!cmd->sg || !cmd->sg_cnt) { 3730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3731 "Unable to process SRR_IU_DATA_OUT due to" 3732 " missing cmd->sg\n"); 3733 dump_stack(); 3734 goto out_reject; 3735 } 3736 if (se_cmd->scsi_status != 0) { 3737 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3738 "Rejecting SRR_IU_DATA_OUT" 3739 " with non GOOD scsi_status\n"); 3740 goto out_reject; 3741 } 3742 cmd->bufflen = se_cmd->data_length; 3743 3744 if (qlt_has_data(cmd)) { 3745 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3746 goto out_reject; 3747 spin_lock_irqsave(&ha->hardware_lock, flags); 3748 qlt_send_notify_ack(vha, ntfy, 3749 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3750 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3751 if (xmit_type & QLA_TGT_XMIT_DATA) 3752 qlt_rdy_to_xfer(cmd); 3753 } else { 3754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3755 "qla_target(%d): SRR for out data for cmd " 3756 "without them (tag %d, SCSI status %d), " 3757 "reject", vha->vp_idx, cmd->tag, 3758 cmd->se_cmd.scsi_status); 3759 goto out_reject; 3760 } 3761 break; 3762 default: 3763 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3764 "qla_target(%d): Unknown srr_ui value %x", 3765 vha->vp_idx, srr_ui); 3766 goto out_reject; 3767 } 3768 3769 /* Transmit response in case of status and data-in cases */ 3770 if (resp) 3771 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3772 3773 return; 3774 3775 out_reject: 3776 spin_lock_irqsave(&ha->hardware_lock, flags); 3777 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3778 NOTIFY_ACK_SRR_FLAGS_REJECT, 3779 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3780 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3781 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3782 cmd->state = QLA_TGT_STATE_DATA_IN; 3783 dump_stack(); 3784 } else 3785 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3786 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3787 } 3788 3789 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3790 struct qla_tgt_srr_imm *imm, int ha_locked) 3791 { 3792 struct qla_hw_data *ha = vha->hw; 3793 unsigned long flags = 0; 3794 3795 if (!ha_locked) 3796 spin_lock_irqsave(&ha->hardware_lock, flags); 3797 3798 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3799 NOTIFY_ACK_SRR_FLAGS_REJECT, 3800 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3801 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3802 3803 if (!ha_locked) 3804 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3805 3806 kfree(imm); 3807 } 3808 3809 static void qlt_handle_srr_work(struct work_struct *work) 3810 { 3811 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3812 struct scsi_qla_host *vha = tgt->vha; 3813 struct qla_tgt_srr_ctio *sctio; 3814 unsigned long flags; 3815 3816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3817 tgt); 3818 3819 restart: 3820 spin_lock_irqsave(&tgt->srr_lock, flags); 3821 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3822 struct qla_tgt_srr_imm *imm, *i, *ti; 3823 struct qla_tgt_cmd *cmd; 3824 struct se_cmd *se_cmd; 3825 3826 imm = NULL; 3827 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3828 srr_list_entry) { 3829 if (i->srr_id == sctio->srr_id) { 3830 list_del(&i->srr_list_entry); 3831 if (imm) { 3832 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3833 "qla_target(%d): There must be " 3834 "only one IMM SRR per CTIO SRR " 3835 "(IMM SRR %p, id %d, CTIO %p\n", 3836 vha->vp_idx, i, i->srr_id, sctio); 3837 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3838 } else 3839 imm = i; 3840 } 3841 } 3842 3843 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3844 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3845 sctio->srr_id); 3846 3847 if (imm == NULL) { 3848 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3849 "Not found matching IMM for SRR CTIO (id %d)\n", 3850 sctio->srr_id); 3851 continue; 3852 } else 3853 list_del(&sctio->srr_list_entry); 3854 3855 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3856 3857 cmd = sctio->cmd; 3858 /* 3859 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3860 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3861 * logic.. 3862 */ 3863 cmd->offset = 0; 3864 if (cmd->free_sg) { 3865 kfree(cmd->sg); 3866 cmd->sg = NULL; 3867 cmd->free_sg = 0; 3868 } 3869 se_cmd = &cmd->se_cmd; 3870 3871 cmd->sg_cnt = se_cmd->t_data_nents; 3872 cmd->sg = se_cmd->t_data_sg; 3873 3874 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3875 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3876 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3877 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 3878 cmd->sg_cnt, cmd->offset); 3879 3880 qlt_handle_srr(vha, sctio, imm); 3881 3882 kfree(imm); 3883 kfree(sctio); 3884 goto restart; 3885 } 3886 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3887 } 3888 3889 /* ha->hardware_lock supposed to be held on entry */ 3890 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3891 struct imm_ntfy_from_isp *iocb) 3892 { 3893 struct qla_tgt_srr_imm *imm; 3894 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 3895 struct qla_tgt_srr_ctio *sctio; 3896 3897 tgt->imm_srr_id++; 3898 3899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3900 vha->vp_idx); 3901 3902 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3903 if (imm != NULL) { 3904 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3905 3906 /* IRQ is already OFF */ 3907 spin_lock(&tgt->srr_lock); 3908 imm->srr_id = tgt->imm_srr_id; 3909 list_add_tail(&imm->srr_list_entry, 3910 &tgt->srr_imm_list); 3911 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3912 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3913 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3914 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3915 int found = 0; 3916 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3917 srr_list_entry) { 3918 if (sctio->srr_id == imm->srr_id) { 3919 found = 1; 3920 break; 3921 } 3922 } 3923 if (found) { 3924 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3925 "Scheduling srr work\n"); 3926 schedule_work(&tgt->srr_work); 3927 } else { 3928 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3929 "qla_target(%d): imm_srr_id " 3930 "== ctio_srr_id (%d), but there is no " 3931 "corresponding SRR CTIO, deleting IMM " 3932 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3933 imm); 3934 list_del(&imm->srr_list_entry); 3935 3936 kfree(imm); 3937 3938 spin_unlock(&tgt->srr_lock); 3939 goto out_reject; 3940 } 3941 } 3942 spin_unlock(&tgt->srr_lock); 3943 } else { 3944 struct qla_tgt_srr_ctio *ts; 3945 3946 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3947 "qla_target(%d): Unable to allocate SRR IMM " 3948 "entry, SRR request will be rejected\n", vha->vp_idx); 3949 3950 /* IRQ is already OFF */ 3951 spin_lock(&tgt->srr_lock); 3952 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3953 srr_list_entry) { 3954 if (sctio->srr_id == tgt->imm_srr_id) { 3955 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3956 "CTIO SRR %p deleted (id %d)\n", 3957 sctio, sctio->srr_id); 3958 list_del(&sctio->srr_list_entry); 3959 qlt_send_term_exchange(vha, sctio->cmd, 3960 &sctio->cmd->atio, 1); 3961 kfree(sctio); 3962 } 3963 } 3964 spin_unlock(&tgt->srr_lock); 3965 goto out_reject; 3966 } 3967 3968 return; 3969 3970 out_reject: 3971 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3972 NOTIFY_ACK_SRR_FLAGS_REJECT, 3973 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3974 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3975 } 3976 3977 /* 3978 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3979 */ 3980 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3981 struct imm_ntfy_from_isp *iocb) 3982 { 3983 struct qla_hw_data *ha = vha->hw; 3984 uint32_t add_flags = 0; 3985 int send_notify_ack = 1; 3986 uint16_t status; 3987 3988 status = le16_to_cpu(iocb->u.isp2x.status); 3989 switch (status) { 3990 case IMM_NTFY_LIP_RESET: 3991 { 3992 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3993 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3994 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 3995 iocb->u.isp24.status_subcode); 3996 3997 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3998 send_notify_ack = 0; 3999 break; 4000 } 4001 4002 case IMM_NTFY_LIP_LINK_REINIT: 4003 { 4004 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4005 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 4006 "qla_target(%d): LINK REINIT (loop %#x, " 4007 "subcode %x)\n", vha->vp_idx, 4008 le16_to_cpu(iocb->u.isp24.nport_handle), 4009 iocb->u.isp24.status_subcode); 4010 if (tgt->link_reinit_iocb_pending) { 4011 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 4012 0, 0, 0, 0, 0, 0); 4013 } 4014 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 4015 tgt->link_reinit_iocb_pending = 1; 4016 /* 4017 * QLogic requires to wait after LINK REINIT for possible 4018 * PDISC or ADISC ELS commands 4019 */ 4020 send_notify_ack = 0; 4021 break; 4022 } 4023 4024 case IMM_NTFY_PORT_LOGOUT: 4025 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 4026 "qla_target(%d): Port logout (loop " 4027 "%#x, subcode %x)\n", vha->vp_idx, 4028 le16_to_cpu(iocb->u.isp24.nport_handle), 4029 iocb->u.isp24.status_subcode); 4030 4031 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 4032 send_notify_ack = 0; 4033 /* The sessions will be cleared in the callback, if needed */ 4034 break; 4035 4036 case IMM_NTFY_GLBL_TPRLO: 4037 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 4038 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 4039 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4040 send_notify_ack = 0; 4041 /* The sessions will be cleared in the callback, if needed */ 4042 break; 4043 4044 case IMM_NTFY_PORT_CONFIG: 4045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 4046 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 4047 status); 4048 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 4049 send_notify_ack = 0; 4050 /* The sessions will be cleared in the callback, if needed */ 4051 break; 4052 4053 case IMM_NTFY_GLBL_LOGO: 4054 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 4055 "qla_target(%d): Link failure detected\n", 4056 vha->vp_idx); 4057 /* I_T nexus loss */ 4058 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 4059 send_notify_ack = 0; 4060 break; 4061 4062 case IMM_NTFY_IOCB_OVERFLOW: 4063 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 4064 "qla_target(%d): Cannot provide requested " 4065 "capability (IOCB overflowed the immediate notify " 4066 "resource count)\n", vha->vp_idx); 4067 break; 4068 4069 case IMM_NTFY_ABORT_TASK: 4070 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 4071 "qla_target(%d): Abort Task (S %08x I %#x -> " 4072 "L %#x)\n", vha->vp_idx, 4073 le16_to_cpu(iocb->u.isp2x.seq_id), 4074 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 4075 le16_to_cpu(iocb->u.isp2x.lun)); 4076 if (qlt_abort_task(vha, iocb) == 0) 4077 send_notify_ack = 0; 4078 break; 4079 4080 case IMM_NTFY_RESOURCE: 4081 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 4082 "qla_target(%d): Out of resources, host %ld\n", 4083 vha->vp_idx, vha->host_no); 4084 break; 4085 4086 case IMM_NTFY_MSG_RX: 4087 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 4088 "qla_target(%d): Immediate notify task %x\n", 4089 vha->vp_idx, iocb->u.isp2x.task_flags); 4090 if (qlt_handle_task_mgmt(vha, iocb) == 0) 4091 send_notify_ack = 0; 4092 break; 4093 4094 case IMM_NTFY_ELS: 4095 if (qlt_24xx_handle_els(vha, iocb) == 0) 4096 send_notify_ack = 0; 4097 break; 4098 4099 case IMM_NTFY_SRR: 4100 qlt_prepare_srr_imm(vha, iocb); 4101 send_notify_ack = 0; 4102 break; 4103 4104 default: 4105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 4106 "qla_target(%d): Received unknown immediate " 4107 "notify status %x\n", vha->vp_idx, status); 4108 break; 4109 } 4110 4111 if (send_notify_ack) 4112 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 4113 } 4114 4115 /* 4116 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4117 * This function sends busy to ISP 2xxx or 24xx. 4118 */ 4119 static void qlt_send_busy(struct scsi_qla_host *vha, 4120 struct atio_from_isp *atio, uint16_t status) 4121 { 4122 struct ctio7_to_24xx *ctio24; 4123 struct qla_hw_data *ha = vha->hw; 4124 request_t *pkt; 4125 struct qla_tgt_sess *sess = NULL; 4126 4127 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4128 atio->u.isp24.fcp_hdr.s_id); 4129 if (!sess) { 4130 qlt_send_term_exchange(vha, NULL, atio, 1); 4131 return; 4132 } 4133 /* Sending marker isn't necessary, since we called from ISR */ 4134 4135 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 4136 if (!pkt) { 4137 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 4138 "qla_target(%d): %s failed: unable to allocate " 4139 "request packet", vha->vp_idx, __func__); 4140 return; 4141 } 4142 4143 pkt->entry_count = 1; 4144 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 4145 4146 ctio24 = (struct ctio7_to_24xx *)pkt; 4147 ctio24->entry_type = CTIO_TYPE7; 4148 ctio24->nport_handle = sess->loop_id; 4149 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 4150 ctio24->vp_index = vha->vp_idx; 4151 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 4152 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 4153 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 4154 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 4155 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 4156 __constant_cpu_to_le16( 4157 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 4158 CTIO7_FLAGS_DONT_RET_CTIO); 4159 /* 4160 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 4161 * if the explicit conformation is used. 4162 */ 4163 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 4164 ctio24->u.status1.scsi_status = cpu_to_le16(status); 4165 ctio24->u.status1.residual = get_unaligned((uint32_t *) 4166 &atio->u.isp24.fcp_cmnd.add_cdb[ 4167 atio->u.isp24.fcp_cmnd.add_cdb_len]); 4168 if (ctio24->u.status1.residual != 0) 4169 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 4170 4171 qla2x00_start_iocbs(vha, vha->req); 4172 } 4173 4174 /* ha->hardware_lock supposed to be held on entry */ 4175 /* called via callback from qla2xxx */ 4176 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 4177 struct atio_from_isp *atio) 4178 { 4179 struct qla_hw_data *ha = vha->hw; 4180 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4181 int rc; 4182 4183 if (unlikely(tgt == NULL)) { 4184 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 4185 "ATIO pkt, but no tgt (ha %p)", ha); 4186 return; 4187 } 4188 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 4189 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 4190 vha->vp_idx, atio, atio->u.raw.entry_type, 4191 atio->u.raw.entry_count); 4192 /* 4193 * In tgt_stop mode we also should allow all requests to pass. 4194 * Otherwise, some commands can stuck. 4195 */ 4196 4197 tgt->irq_cmd_count++; 4198 4199 switch (atio->u.raw.entry_type) { 4200 case ATIO_TYPE7: 4201 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 4202 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", 4203 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 4204 atio->u.isp24.fcp_cmnd.rddata, 4205 atio->u.isp24.fcp_cmnd.wrdata, 4206 atio->u.isp24.fcp_cmnd.cdb[0], 4207 atio->u.isp24.fcp_cmnd.add_cdb_len, 4208 be32_to_cpu(get_unaligned((uint32_t *) 4209 &atio->u.isp24.fcp_cmnd.add_cdb[ 4210 atio->u.isp24.fcp_cmnd.add_cdb_len])), 4211 atio->u.isp24.fcp_hdr.s_id[0], 4212 atio->u.isp24.fcp_hdr.s_id[1], 4213 atio->u.isp24.fcp_hdr.s_id[2]); 4214 4215 if (unlikely(atio->u.isp24.exchange_addr == 4216 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 4217 ql_dbg(ql_dbg_tgt, vha, 0xe058, 4218 "qla_target(%d): ATIO_TYPE7 " 4219 "received with UNKNOWN exchange address, " 4220 "sending QUEUE_FULL\n", vha->vp_idx); 4221 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 4222 break; 4223 } 4224 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 4225 rc = qlt_handle_cmd_for_atio(vha, atio); 4226 else 4227 rc = qlt_handle_task_mgmt(vha, atio); 4228 if (unlikely(rc != 0)) { 4229 if (rc == -ESRCH) { 4230 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 4231 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 4232 #else 4233 qlt_send_term_exchange(vha, NULL, atio, 1); 4234 #endif 4235 } else { 4236 if (tgt->tgt_stop) { 4237 ql_dbg(ql_dbg_tgt, vha, 0xe059, 4238 "qla_target: Unable to send " 4239 "command to target for req, " 4240 "ignoring.\n"); 4241 } else { 4242 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 4243 "qla_target(%d): Unable to send " 4244 "command to target, sending BUSY " 4245 "status.\n", vha->vp_idx); 4246 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 4247 } 4248 } 4249 } 4250 break; 4251 4252 case IMMED_NOTIFY_TYPE: 4253 { 4254 if (unlikely(atio->u.isp2x.entry_status != 0)) { 4255 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 4256 "qla_target(%d): Received ATIO packet %x " 4257 "with error status %x\n", vha->vp_idx, 4258 atio->u.raw.entry_type, 4259 atio->u.isp2x.entry_status); 4260 break; 4261 } 4262 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 4263 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 4264 break; 4265 } 4266 4267 default: 4268 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 4269 "qla_target(%d): Received unknown ATIO atio " 4270 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 4271 break; 4272 } 4273 4274 tgt->irq_cmd_count--; 4275 } 4276 4277 /* ha->hardware_lock supposed to be held on entry */ 4278 /* called via callback from qla2xxx */ 4279 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 4280 { 4281 struct qla_hw_data *ha = vha->hw; 4282 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4283 4284 if (unlikely(tgt == NULL)) { 4285 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 4286 "qla_target(%d): Response pkt %x received, but no " 4287 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 4288 return; 4289 } 4290 4291 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 4292 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 4293 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 4294 pkt->entry_count, pkt->entry_status, pkt->handle); 4295 4296 /* 4297 * In tgt_stop mode we also should allow all requests to pass. 4298 * Otherwise, some commands can stuck. 4299 */ 4300 4301 tgt->irq_cmd_count++; 4302 4303 switch (pkt->entry_type) { 4304 case CTIO_CRC2: 4305 case CTIO_TYPE7: 4306 { 4307 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 4308 ql_dbg(ql_dbg_tgt, vha, 0xe030, 4309 "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", 4310 entry->entry_type, vha->vp_idx); 4311 qlt_do_ctio_completion(vha, entry->handle, 4312 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4313 entry); 4314 break; 4315 } 4316 4317 case ACCEPT_TGT_IO_TYPE: 4318 { 4319 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 4320 int rc; 4321 ql_dbg(ql_dbg_tgt, vha, 0xe031, 4322 "ACCEPT_TGT_IO instance %d status %04x " 4323 "lun %04x read/write %d data_length %04x " 4324 "target_id %02x rx_id %04x\n ", vha->vp_idx, 4325 le16_to_cpu(atio->u.isp2x.status), 4326 le16_to_cpu(atio->u.isp2x.lun), 4327 atio->u.isp2x.execution_codes, 4328 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 4329 atio), atio->u.isp2x.rx_id); 4330 if (atio->u.isp2x.status != 4331 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 4332 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 4333 "qla_target(%d): ATIO with error " 4334 "status %x received\n", vha->vp_idx, 4335 le16_to_cpu(atio->u.isp2x.status)); 4336 break; 4337 } 4338 ql_dbg(ql_dbg_tgt, vha, 0xe032, 4339 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 4340 atio->u.isp2x.cdb[0], (unsigned long 4341 int)sizeof(atio->u.isp2x.cdb)); 4342 4343 rc = qlt_handle_cmd_for_atio(vha, atio); 4344 if (unlikely(rc != 0)) { 4345 if (rc == -ESRCH) { 4346 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 4347 qlt_send_busy(vha, atio, 0); 4348 #else 4349 qlt_send_term_exchange(vha, NULL, atio, 1); 4350 #endif 4351 } else { 4352 if (tgt->tgt_stop) { 4353 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 4354 "qla_target: Unable to send " 4355 "command to target, sending TERM " 4356 "EXCHANGE for rsp\n"); 4357 qlt_send_term_exchange(vha, NULL, 4358 atio, 1); 4359 } else { 4360 ql_dbg(ql_dbg_tgt, vha, 0xe060, 4361 "qla_target(%d): Unable to send " 4362 "command to target, sending BUSY " 4363 "status\n", vha->vp_idx); 4364 qlt_send_busy(vha, atio, 0); 4365 } 4366 } 4367 } 4368 } 4369 break; 4370 4371 case CONTINUE_TGT_IO_TYPE: 4372 { 4373 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4374 ql_dbg(ql_dbg_tgt, vha, 0xe033, 4375 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 4376 qlt_do_ctio_completion(vha, entry->handle, 4377 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4378 entry); 4379 break; 4380 } 4381 4382 case CTIO_A64_TYPE: 4383 { 4384 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4385 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 4386 vha->vp_idx); 4387 qlt_do_ctio_completion(vha, entry->handle, 4388 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4389 entry); 4390 break; 4391 } 4392 4393 case IMMED_NOTIFY_TYPE: 4394 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 4395 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 4396 break; 4397 4398 case NOTIFY_ACK_TYPE: 4399 if (tgt->notify_ack_expected > 0) { 4400 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 4401 ql_dbg(ql_dbg_tgt, vha, 0xe036, 4402 "NOTIFY_ACK seq %08x status %x\n", 4403 le16_to_cpu(entry->u.isp2x.seq_id), 4404 le16_to_cpu(entry->u.isp2x.status)); 4405 tgt->notify_ack_expected--; 4406 if (entry->u.isp2x.status != 4407 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 4408 ql_dbg(ql_dbg_tgt, vha, 0xe061, 4409 "qla_target(%d): NOTIFY_ACK " 4410 "failed %x\n", vha->vp_idx, 4411 le16_to_cpu(entry->u.isp2x.status)); 4412 } 4413 } else { 4414 ql_dbg(ql_dbg_tgt, vha, 0xe062, 4415 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 4416 vha->vp_idx); 4417 } 4418 break; 4419 4420 case ABTS_RECV_24XX: 4421 ql_dbg(ql_dbg_tgt, vha, 0xe037, 4422 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 4423 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 4424 break; 4425 4426 case ABTS_RESP_24XX: 4427 if (tgt->abts_resp_expected > 0) { 4428 struct abts_resp_from_24xx_fw *entry = 4429 (struct abts_resp_from_24xx_fw *)pkt; 4430 ql_dbg(ql_dbg_tgt, vha, 0xe038, 4431 "ABTS_RESP_24XX: compl_status %x\n", 4432 entry->compl_status); 4433 tgt->abts_resp_expected--; 4434 if (le16_to_cpu(entry->compl_status) != 4435 ABTS_RESP_COMPL_SUCCESS) { 4436 if ((entry->error_subcode1 == 0x1E) && 4437 (entry->error_subcode2 == 0)) { 4438 /* 4439 * We've got a race here: aborted 4440 * exchange not terminated, i.e. 4441 * response for the aborted command was 4442 * sent between the abort request was 4443 * received and processed. 4444 * Unfortunately, the firmware has a 4445 * silly requirement that all aborted 4446 * exchanges must be explicitely 4447 * terminated, otherwise it refuses to 4448 * send responses for the abort 4449 * requests. So, we have to 4450 * (re)terminate the exchange and retry 4451 * the abort response. 4452 */ 4453 qlt_24xx_retry_term_exchange(vha, 4454 entry); 4455 } else 4456 ql_dbg(ql_dbg_tgt, vha, 0xe063, 4457 "qla_target(%d): ABTS_RESP_24XX " 4458 "failed %x (subcode %x:%x)", 4459 vha->vp_idx, entry->compl_status, 4460 entry->error_subcode1, 4461 entry->error_subcode2); 4462 } 4463 } else { 4464 ql_dbg(ql_dbg_tgt, vha, 0xe064, 4465 "qla_target(%d): Unexpected ABTS_RESP_24XX " 4466 "received\n", vha->vp_idx); 4467 } 4468 break; 4469 4470 default: 4471 ql_dbg(ql_dbg_tgt, vha, 0xe065, 4472 "qla_target(%d): Received unknown response pkt " 4473 "type %x\n", vha->vp_idx, pkt->entry_type); 4474 break; 4475 } 4476 4477 tgt->irq_cmd_count--; 4478 } 4479 4480 /* 4481 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4482 */ 4483 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 4484 uint16_t *mailbox) 4485 { 4486 struct qla_hw_data *ha = vha->hw; 4487 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4488 int login_code; 4489 4490 ql_dbg(ql_dbg_tgt, vha, 0xe039, 4491 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 4492 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 4493 ha->operating_mode, ha->current_topology); 4494 4495 if (!ha->tgt.tgt_ops) 4496 return; 4497 4498 if (unlikely(tgt == NULL)) { 4499 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 4500 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 4501 return; 4502 } 4503 4504 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 4505 IS_QLA2100(ha)) 4506 return; 4507 /* 4508 * In tgt_stop mode we also should allow all requests to pass. 4509 * Otherwise, some commands can stuck. 4510 */ 4511 4512 tgt->irq_cmd_count++; 4513 4514 switch (code) { 4515 case MBA_RESET: /* Reset */ 4516 case MBA_SYSTEM_ERR: /* System Error */ 4517 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 4518 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 4519 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 4520 "qla_target(%d): System error async event %#x " 4521 "occurred", vha->vp_idx, code); 4522 break; 4523 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 4524 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4525 break; 4526 4527 case MBA_LOOP_UP: 4528 { 4529 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 4530 "qla_target(%d): Async LOOP_UP occurred " 4531 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 4532 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4533 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4534 if (tgt->link_reinit_iocb_pending) { 4535 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 4536 0, 0, 0, 0, 0, 0); 4537 tgt->link_reinit_iocb_pending = 0; 4538 } 4539 break; 4540 } 4541 4542 case MBA_LIP_OCCURRED: 4543 case MBA_LOOP_DOWN: 4544 case MBA_LIP_RESET: 4545 case MBA_RSCN_UPDATE: 4546 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 4547 "qla_target(%d): Async event %#x occurred " 4548 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4549 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4550 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4551 break; 4552 4553 case MBA_PORT_UPDATE: 4554 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 4555 "qla_target(%d): Port update async event %#x " 4556 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 4557 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4558 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4559 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4560 4561 login_code = le16_to_cpu(mailbox[2]); 4562 if (login_code == 0x4) 4563 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 4564 "Async MB 2: Got PLOGI Complete\n"); 4565 else if (login_code == 0x7) 4566 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 4567 "Async MB 2: Port Logged Out\n"); 4568 break; 4569 4570 default: 4571 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 4572 "qla_target(%d): Async event %#x occurred: " 4573 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 4574 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4575 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4576 break; 4577 } 4578 4579 tgt->irq_cmd_count--; 4580 } 4581 4582 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 4583 uint16_t loop_id) 4584 { 4585 fc_port_t *fcport; 4586 int rc; 4587 4588 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 4589 if (!fcport) { 4590 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 4591 "qla_target(%d): Allocation of tmp FC port failed", 4592 vha->vp_idx); 4593 return NULL; 4594 } 4595 4596 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 4597 4598 fcport->loop_id = loop_id; 4599 4600 rc = qla2x00_get_port_database(vha, fcport, 0); 4601 if (rc != QLA_SUCCESS) { 4602 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 4603 "qla_target(%d): Failed to retrieve fcport " 4604 "information -- get_port_database() returned %x " 4605 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 4606 kfree(fcport); 4607 return NULL; 4608 } 4609 4610 return fcport; 4611 } 4612 4613 /* Must be called under tgt_mutex */ 4614 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 4615 uint8_t *s_id) 4616 { 4617 struct qla_tgt_sess *sess = NULL; 4618 fc_port_t *fcport = NULL; 4619 int rc, global_resets; 4620 uint16_t loop_id = 0; 4621 4622 retry: 4623 global_resets = 4624 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 4625 4626 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 4627 if (rc != 0) { 4628 if ((s_id[0] == 0xFF) && 4629 (s_id[1] == 0xFC)) { 4630 /* 4631 * This is Domain Controller, so it should be 4632 * OK to drop SCSI commands from it. 4633 */ 4634 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 4635 "Unable to find initiator with S_ID %x:%x:%x", 4636 s_id[0], s_id[1], s_id[2]); 4637 } else 4638 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 4639 "qla_target(%d): Unable to find " 4640 "initiator with S_ID %x:%x:%x", 4641 vha->vp_idx, s_id[0], s_id[1], 4642 s_id[2]); 4643 return NULL; 4644 } 4645 4646 fcport = qlt_get_port_database(vha, loop_id); 4647 if (!fcport) 4648 return NULL; 4649 4650 if (global_resets != 4651 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 4652 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 4653 "qla_target(%d): global reset during session discovery " 4654 "(counter was %d, new %d), retrying", vha->vp_idx, 4655 global_resets, 4656 atomic_read(&vha->vha_tgt. 4657 qla_tgt->tgt_global_resets_count)); 4658 goto retry; 4659 } 4660 4661 sess = qlt_create_sess(vha, fcport, true); 4662 4663 kfree(fcport); 4664 return sess; 4665 } 4666 4667 static void qlt_abort_work(struct qla_tgt *tgt, 4668 struct qla_tgt_sess_work_param *prm) 4669 { 4670 struct scsi_qla_host *vha = tgt->vha; 4671 struct qla_hw_data *ha = vha->hw; 4672 struct qla_tgt_sess *sess = NULL; 4673 unsigned long flags; 4674 uint32_t be_s_id; 4675 uint8_t s_id[3]; 4676 int rc; 4677 4678 spin_lock_irqsave(&ha->hardware_lock, flags); 4679 4680 if (tgt->tgt_stop) 4681 goto out_term; 4682 4683 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 4684 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 4685 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 4686 4687 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4688 (unsigned char *)&be_s_id); 4689 if (!sess) { 4690 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4691 4692 mutex_lock(&vha->vha_tgt.tgt_mutex); 4693 sess = qlt_make_local_sess(vha, s_id); 4694 /* sess has got an extra creation ref */ 4695 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4696 4697 spin_lock_irqsave(&ha->hardware_lock, flags); 4698 if (!sess) 4699 goto out_term; 4700 } else { 4701 kref_get(&sess->se_sess->sess_kref); 4702 } 4703 4704 if (tgt->tgt_stop) 4705 goto out_term; 4706 4707 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4708 if (rc != 0) 4709 goto out_term; 4710 4711 ha->tgt.tgt_ops->put_sess(sess); 4712 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4713 return; 4714 4715 out_term: 4716 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4717 if (sess) 4718 ha->tgt.tgt_ops->put_sess(sess); 4719 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4720 } 4721 4722 static void qlt_tmr_work(struct qla_tgt *tgt, 4723 struct qla_tgt_sess_work_param *prm) 4724 { 4725 struct atio_from_isp *a = &prm->tm_iocb2; 4726 struct scsi_qla_host *vha = tgt->vha; 4727 struct qla_hw_data *ha = vha->hw; 4728 struct qla_tgt_sess *sess = NULL; 4729 unsigned long flags; 4730 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4731 int rc; 4732 uint32_t lun, unpacked_lun; 4733 int lun_size, fn; 4734 void *iocb; 4735 4736 spin_lock_irqsave(&ha->hardware_lock, flags); 4737 4738 if (tgt->tgt_stop) 4739 goto out_term; 4740 4741 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4742 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4743 if (!sess) { 4744 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4745 4746 mutex_lock(&vha->vha_tgt.tgt_mutex); 4747 sess = qlt_make_local_sess(vha, s_id); 4748 /* sess has got an extra creation ref */ 4749 mutex_unlock(&vha->vha_tgt.tgt_mutex); 4750 4751 spin_lock_irqsave(&ha->hardware_lock, flags); 4752 if (!sess) 4753 goto out_term; 4754 } else { 4755 kref_get(&sess->se_sess->sess_kref); 4756 } 4757 4758 iocb = a; 4759 lun = a->u.isp24.fcp_cmnd.lun; 4760 lun_size = sizeof(lun); 4761 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4762 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4763 4764 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4765 if (rc != 0) 4766 goto out_term; 4767 4768 ha->tgt.tgt_ops->put_sess(sess); 4769 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4770 return; 4771 4772 out_term: 4773 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4774 if (sess) 4775 ha->tgt.tgt_ops->put_sess(sess); 4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4777 } 4778 4779 static void qlt_sess_work_fn(struct work_struct *work) 4780 { 4781 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4782 struct scsi_qla_host *vha = tgt->vha; 4783 unsigned long flags; 4784 4785 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4786 4787 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4788 while (!list_empty(&tgt->sess_works_list)) { 4789 struct qla_tgt_sess_work_param *prm = list_entry( 4790 tgt->sess_works_list.next, typeof(*prm), 4791 sess_works_list_entry); 4792 4793 /* 4794 * This work can be scheduled on several CPUs at time, so we 4795 * must delete the entry to eliminate double processing 4796 */ 4797 list_del(&prm->sess_works_list_entry); 4798 4799 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4800 4801 switch (prm->type) { 4802 case QLA_TGT_SESS_WORK_ABORT: 4803 qlt_abort_work(tgt, prm); 4804 break; 4805 case QLA_TGT_SESS_WORK_TM: 4806 qlt_tmr_work(tgt, prm); 4807 break; 4808 default: 4809 BUG_ON(1); 4810 break; 4811 } 4812 4813 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4814 4815 kfree(prm); 4816 } 4817 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4818 } 4819 4820 /* Must be called under tgt_host_action_mutex */ 4821 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4822 { 4823 struct qla_tgt *tgt; 4824 4825 if (!QLA_TGT_MODE_ENABLED()) 4826 return 0; 4827 4828 if (!IS_TGT_MODE_CAPABLE(ha)) { 4829 ql_log(ql_log_warn, base_vha, 0xe070, 4830 "This adapter does not support target mode.\n"); 4831 return 0; 4832 } 4833 4834 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4835 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 4836 4837 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 4838 4839 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4840 if (!tgt) { 4841 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4842 "Unable to allocate struct qla_tgt\n"); 4843 return -ENOMEM; 4844 } 4845 4846 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4847 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4848 4849 tgt->ha = ha; 4850 tgt->vha = base_vha; 4851 init_waitqueue_head(&tgt->waitQ); 4852 INIT_LIST_HEAD(&tgt->sess_list); 4853 INIT_LIST_HEAD(&tgt->del_sess_list); 4854 INIT_DELAYED_WORK(&tgt->sess_del_work, 4855 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4856 spin_lock_init(&tgt->sess_work_lock); 4857 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4858 INIT_LIST_HEAD(&tgt->sess_works_list); 4859 spin_lock_init(&tgt->srr_lock); 4860 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4861 INIT_LIST_HEAD(&tgt->srr_imm_list); 4862 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4863 atomic_set(&tgt->tgt_global_resets_count, 0); 4864 4865 base_vha->vha_tgt.qla_tgt = tgt; 4866 4867 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4868 "qla_target(%d): using 64 Bit PCI addressing", 4869 base_vha->vp_idx); 4870 tgt->tgt_enable_64bit_addr = 1; 4871 /* 3 is reserved */ 4872 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4873 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4874 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4875 4876 if (base_vha->fc_vport) 4877 return 0; 4878 4879 mutex_lock(&qla_tgt_mutex); 4880 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4881 mutex_unlock(&qla_tgt_mutex); 4882 4883 return 0; 4884 } 4885 4886 /* Must be called under tgt_host_action_mutex */ 4887 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4888 { 4889 if (!vha->vha_tgt.qla_tgt) 4890 return 0; 4891 4892 if (vha->fc_vport) { 4893 qlt_release(vha->vha_tgt.qla_tgt); 4894 return 0; 4895 } 4896 mutex_lock(&qla_tgt_mutex); 4897 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 4898 mutex_unlock(&qla_tgt_mutex); 4899 4900 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4901 vha->host_no, ha); 4902 qlt_release(vha->vha_tgt.qla_tgt); 4903 4904 return 0; 4905 } 4906 4907 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4908 unsigned char *b) 4909 { 4910 int i; 4911 4912 pr_debug("qla2xxx HW vha->node_name: "); 4913 for (i = 0; i < WWN_SIZE; i++) 4914 pr_debug("%02x ", vha->node_name[i]); 4915 pr_debug("\n"); 4916 pr_debug("qla2xxx HW vha->port_name: "); 4917 for (i = 0; i < WWN_SIZE; i++) 4918 pr_debug("%02x ", vha->port_name[i]); 4919 pr_debug("\n"); 4920 4921 pr_debug("qla2xxx passed configfs WWPN: "); 4922 put_unaligned_be64(wwpn, b); 4923 for (i = 0; i < WWN_SIZE; i++) 4924 pr_debug("%02x ", b[i]); 4925 pr_debug("\n"); 4926 } 4927 4928 /** 4929 * qla_tgt_lport_register - register lport with external module 4930 * 4931 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4932 * @wwpn: Passwd FC target WWPN 4933 * @callback: lport initialization callback for tcm_qla2xxx code 4934 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4935 */ 4936 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 4937 u64 npiv_wwpn, u64 npiv_wwnn, 4938 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 4939 { 4940 struct qla_tgt *tgt; 4941 struct scsi_qla_host *vha; 4942 struct qla_hw_data *ha; 4943 struct Scsi_Host *host; 4944 unsigned long flags; 4945 int rc; 4946 u8 b[WWN_SIZE]; 4947 4948 mutex_lock(&qla_tgt_mutex); 4949 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4950 vha = tgt->vha; 4951 ha = vha->hw; 4952 4953 host = vha->host; 4954 if (!host) 4955 continue; 4956 4957 if (!(host->hostt->supported_mode & MODE_TARGET)) 4958 continue; 4959 4960 spin_lock_irqsave(&ha->hardware_lock, flags); 4961 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 4962 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4963 host->host_no); 4964 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4965 continue; 4966 } 4967 if (tgt->tgt_stop) { 4968 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 4969 host->host_no); 4970 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4971 continue; 4972 } 4973 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4974 4975 if (!scsi_host_get(host)) { 4976 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4977 "Unable to scsi_host_get() for" 4978 " qla2xxx scsi_host\n"); 4979 continue; 4980 } 4981 qlt_lport_dump(vha, phys_wwpn, b); 4982 4983 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4984 scsi_host_put(host); 4985 continue; 4986 } 4987 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 4988 if (rc != 0) 4989 scsi_host_put(host); 4990 4991 mutex_unlock(&qla_tgt_mutex); 4992 return rc; 4993 } 4994 mutex_unlock(&qla_tgt_mutex); 4995 4996 return -ENODEV; 4997 } 4998 EXPORT_SYMBOL(qlt_lport_register); 4999 5000 /** 5001 * qla_tgt_lport_deregister - Degister lport 5002 * 5003 * @vha: Registered scsi_qla_host pointer 5004 */ 5005 void qlt_lport_deregister(struct scsi_qla_host *vha) 5006 { 5007 struct qla_hw_data *ha = vha->hw; 5008 struct Scsi_Host *sh = vha->host; 5009 /* 5010 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 5011 */ 5012 vha->vha_tgt.target_lport_ptr = NULL; 5013 ha->tgt.tgt_ops = NULL; 5014 /* 5015 * Release the Scsi_Host reference for the underlying qla2xxx host 5016 */ 5017 scsi_host_put(sh); 5018 } 5019 EXPORT_SYMBOL(qlt_lport_deregister); 5020 5021 /* Must be called under HW lock */ 5022 void qlt_set_mode(struct scsi_qla_host *vha) 5023 { 5024 struct qla_hw_data *ha = vha->hw; 5025 5026 switch (ql2x_ini_mode) { 5027 case QLA2XXX_INI_MODE_DISABLED: 5028 case QLA2XXX_INI_MODE_EXCLUSIVE: 5029 vha->host->active_mode = MODE_TARGET; 5030 break; 5031 case QLA2XXX_INI_MODE_ENABLED: 5032 vha->host->active_mode |= MODE_TARGET; 5033 break; 5034 default: 5035 break; 5036 } 5037 5038 if (ha->tgt.ini_mode_force_reverse) 5039 qla_reverse_ini_mode(vha); 5040 } 5041 5042 /* Must be called under HW lock */ 5043 void qlt_clear_mode(struct scsi_qla_host *vha) 5044 { 5045 struct qla_hw_data *ha = vha->hw; 5046 5047 switch (ql2x_ini_mode) { 5048 case QLA2XXX_INI_MODE_DISABLED: 5049 vha->host->active_mode = MODE_UNKNOWN; 5050 break; 5051 case QLA2XXX_INI_MODE_EXCLUSIVE: 5052 vha->host->active_mode = MODE_INITIATOR; 5053 break; 5054 case QLA2XXX_INI_MODE_ENABLED: 5055 vha->host->active_mode &= ~MODE_TARGET; 5056 break; 5057 default: 5058 break; 5059 } 5060 5061 if (ha->tgt.ini_mode_force_reverse) 5062 qla_reverse_ini_mode(vha); 5063 } 5064 5065 /* 5066 * qla_tgt_enable_vha - NO LOCK HELD 5067 * 5068 * host_reset, bring up w/ Target Mode Enabled 5069 */ 5070 void 5071 qlt_enable_vha(struct scsi_qla_host *vha) 5072 { 5073 struct qla_hw_data *ha = vha->hw; 5074 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5075 unsigned long flags; 5076 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 5077 5078 if (!tgt) { 5079 ql_dbg(ql_dbg_tgt, vha, 0xe069, 5080 "Unable to locate qla_tgt pointer from" 5081 " struct qla_hw_data\n"); 5082 dump_stack(); 5083 return; 5084 } 5085 5086 spin_lock_irqsave(&ha->hardware_lock, flags); 5087 tgt->tgt_stopped = 0; 5088 qlt_set_mode(vha); 5089 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5090 5091 if (vha->vp_idx) { 5092 qla24xx_disable_vp(vha); 5093 qla24xx_enable_vp(vha); 5094 } else { 5095 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 5096 qla2xxx_wake_dpc(base_vha); 5097 qla2x00_wait_for_hba_online(base_vha); 5098 } 5099 } 5100 EXPORT_SYMBOL(qlt_enable_vha); 5101 5102 /* 5103 * qla_tgt_disable_vha - NO LOCK HELD 5104 * 5105 * Disable Target Mode and reset the adapter 5106 */ 5107 void 5108 qlt_disable_vha(struct scsi_qla_host *vha) 5109 { 5110 struct qla_hw_data *ha = vha->hw; 5111 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5112 unsigned long flags; 5113 5114 if (!tgt) { 5115 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 5116 "Unable to locate qla_tgt pointer from" 5117 " struct qla_hw_data\n"); 5118 dump_stack(); 5119 return; 5120 } 5121 5122 spin_lock_irqsave(&ha->hardware_lock, flags); 5123 qlt_clear_mode(vha); 5124 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5125 5126 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5127 qla2xxx_wake_dpc(vha); 5128 qla2x00_wait_for_hba_online(vha); 5129 } 5130 5131 /* 5132 * Called from qla_init.c:qla24xx_vport_create() contex to setup 5133 * the target mode specific struct scsi_qla_host and struct qla_hw_data 5134 * members. 5135 */ 5136 void 5137 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 5138 { 5139 if (!qla_tgt_mode_enabled(vha)) 5140 return; 5141 5142 vha->vha_tgt.qla_tgt = NULL; 5143 5144 mutex_init(&vha->vha_tgt.tgt_mutex); 5145 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 5146 5147 qlt_clear_mode(vha); 5148 5149 /* 5150 * NOTE: Currently the value is kept the same for <24xx and 5151 * >=24xx ISPs. If it is necessary to change it, 5152 * the check should be added for specific ISPs, 5153 * assigning the value appropriately. 5154 */ 5155 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 5156 5157 qlt_add_target(ha, vha); 5158 } 5159 5160 void 5161 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 5162 { 5163 /* 5164 * FC-4 Feature bit 0 indicates target functionality to the name server. 5165 */ 5166 if (qla_tgt_mode_enabled(vha)) { 5167 if (qla_ini_mode_enabled(vha)) 5168 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 5169 else 5170 ct_req->req.rff_id.fc4_feature = BIT_0; 5171 } else if (qla_ini_mode_enabled(vha)) { 5172 ct_req->req.rff_id.fc4_feature = BIT_1; 5173 } 5174 } 5175 5176 /* 5177 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 5178 * @ha: HA context 5179 * 5180 * Beginning of ATIO ring has initialization control block already built 5181 * by nvram config routine. 5182 * 5183 * Returns 0 on success. 5184 */ 5185 void 5186 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 5187 { 5188 struct qla_hw_data *ha = vha->hw; 5189 uint16_t cnt; 5190 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 5191 5192 if (!qla_tgt_mode_enabled(vha)) 5193 return; 5194 5195 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 5196 pkt->u.raw.signature = ATIO_PROCESSED; 5197 pkt++; 5198 } 5199 5200 } 5201 5202 /* 5203 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 5204 * @ha: SCSI driver HA context 5205 */ 5206 void 5207 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 5208 { 5209 struct qla_hw_data *ha = vha->hw; 5210 struct atio_from_isp *pkt; 5211 int cnt, i; 5212 5213 if (!vha->flags.online) 5214 return; 5215 5216 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 5217 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 5218 cnt = pkt->u.raw.entry_count; 5219 5220 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 5221 5222 for (i = 0; i < cnt; i++) { 5223 ha->tgt.atio_ring_index++; 5224 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 5225 ha->tgt.atio_ring_index = 0; 5226 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 5227 } else 5228 ha->tgt.atio_ring_ptr++; 5229 5230 pkt->u.raw.signature = ATIO_PROCESSED; 5231 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 5232 } 5233 wmb(); 5234 } 5235 5236 /* Adjust ring index */ 5237 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 5238 } 5239 5240 void 5241 qlt_24xx_config_rings(struct scsi_qla_host *vha) 5242 { 5243 struct qla_hw_data *ha = vha->hw; 5244 if (!QLA_TGT_MODE_ENABLED()) 5245 return; 5246 5247 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 5248 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 5249 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 5250 5251 if (IS_ATIO_MSIX_CAPABLE(ha)) { 5252 struct qla_msix_entry *msix = &ha->msix_entries[2]; 5253 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 5254 5255 icb->msix_atio = cpu_to_le16(msix->entry); 5256 ql_dbg(ql_dbg_init, vha, 0xf072, 5257 "Registering ICB vector 0x%x for atio que.\n", 5258 msix->entry); 5259 } 5260 } 5261 5262 void 5263 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 5264 { 5265 struct qla_hw_data *ha = vha->hw; 5266 5267 if (qla_tgt_mode_enabled(vha)) { 5268 if (!ha->tgt.saved_set) { 5269 /* We save only once */ 5270 ha->tgt.saved_exchange_count = nv->exchange_count; 5271 ha->tgt.saved_firmware_options_1 = 5272 nv->firmware_options_1; 5273 ha->tgt.saved_firmware_options_2 = 5274 nv->firmware_options_2; 5275 ha->tgt.saved_firmware_options_3 = 5276 nv->firmware_options_3; 5277 ha->tgt.saved_set = 1; 5278 } 5279 5280 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 5281 5282 /* Enable target mode */ 5283 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 5284 5285 /* Disable ini mode, if requested */ 5286 if (!qla_ini_mode_enabled(vha)) 5287 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 5288 5289 /* Disable Full Login after LIP */ 5290 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5291 /* Enable initial LIP */ 5292 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5293 /* Enable FC tapes support */ 5294 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5295 /* Disable Full Login after LIP */ 5296 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5297 /* Enable target PRLI control */ 5298 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 5299 } else { 5300 if (ha->tgt.saved_set) { 5301 nv->exchange_count = ha->tgt.saved_exchange_count; 5302 nv->firmware_options_1 = 5303 ha->tgt.saved_firmware_options_1; 5304 nv->firmware_options_2 = 5305 ha->tgt.saved_firmware_options_2; 5306 nv->firmware_options_3 = 5307 ha->tgt.saved_firmware_options_3; 5308 } 5309 return; 5310 } 5311 5312 /* out-of-order frames reassembly */ 5313 nv->firmware_options_3 |= BIT_6|BIT_9; 5314 5315 if (ha->tgt.enable_class_2) { 5316 if (vha->flags.init_done) 5317 fc_host_supported_classes(vha->host) = 5318 FC_COS_CLASS2 | FC_COS_CLASS3; 5319 5320 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 5321 } else { 5322 if (vha->flags.init_done) 5323 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 5324 5325 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 5326 } 5327 } 5328 5329 void 5330 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 5331 struct init_cb_24xx *icb) 5332 { 5333 struct qla_hw_data *ha = vha->hw; 5334 5335 if (ha->tgt.node_name_set) { 5336 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 5337 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 5338 } 5339 } 5340 5341 void 5342 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 5343 { 5344 struct qla_hw_data *ha = vha->hw; 5345 5346 if (!QLA_TGT_MODE_ENABLED()) 5347 return; 5348 5349 if (qla_tgt_mode_enabled(vha)) { 5350 if (!ha->tgt.saved_set) { 5351 /* We save only once */ 5352 ha->tgt.saved_exchange_count = nv->exchange_count; 5353 ha->tgt.saved_firmware_options_1 = 5354 nv->firmware_options_1; 5355 ha->tgt.saved_firmware_options_2 = 5356 nv->firmware_options_2; 5357 ha->tgt.saved_firmware_options_3 = 5358 nv->firmware_options_3; 5359 ha->tgt.saved_set = 1; 5360 } 5361 5362 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 5363 5364 /* Enable target mode */ 5365 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 5366 5367 /* Disable ini mode, if requested */ 5368 if (!qla_ini_mode_enabled(vha)) 5369 nv->firmware_options_1 |= 5370 __constant_cpu_to_le32(BIT_5); 5371 5372 /* Disable Full Login after LIP */ 5373 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5374 /* Enable initial LIP */ 5375 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5376 /* Enable FC tapes support */ 5377 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5378 /* Disable Full Login after LIP */ 5379 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5380 /* Enable target PRLI control */ 5381 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 5382 } else { 5383 if (ha->tgt.saved_set) { 5384 nv->exchange_count = ha->tgt.saved_exchange_count; 5385 nv->firmware_options_1 = 5386 ha->tgt.saved_firmware_options_1; 5387 nv->firmware_options_2 = 5388 ha->tgt.saved_firmware_options_2; 5389 nv->firmware_options_3 = 5390 ha->tgt.saved_firmware_options_3; 5391 } 5392 return; 5393 } 5394 5395 /* out-of-order frames reassembly */ 5396 nv->firmware_options_3 |= BIT_6|BIT_9; 5397 5398 if (ha->tgt.enable_class_2) { 5399 if (vha->flags.init_done) 5400 fc_host_supported_classes(vha->host) = 5401 FC_COS_CLASS2 | FC_COS_CLASS3; 5402 5403 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 5404 } else { 5405 if (vha->flags.init_done) 5406 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 5407 5408 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 5409 } 5410 } 5411 5412 void 5413 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 5414 struct init_cb_81xx *icb) 5415 { 5416 struct qla_hw_data *ha = vha->hw; 5417 5418 if (!QLA_TGT_MODE_ENABLED()) 5419 return; 5420 5421 if (ha->tgt.node_name_set) { 5422 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 5423 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 5424 } 5425 } 5426 5427 void 5428 qlt_83xx_iospace_config(struct qla_hw_data *ha) 5429 { 5430 if (!QLA_TGT_MODE_ENABLED()) 5431 return; 5432 5433 ha->msix_count += 1; /* For ATIO Q */ 5434 } 5435 5436 int 5437 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 5438 struct sts_entry_24xx *pkt) 5439 { 5440 switch (pkt->entry_type) { 5441 case ABTS_RECV_24XX: 5442 case ABTS_RESP_24XX: 5443 case CTIO_TYPE7: 5444 case NOTIFY_ACK_TYPE: 5445 case CTIO_CRC2: 5446 return 1; 5447 default: 5448 return 0; 5449 } 5450 } 5451 5452 void 5453 qlt_modify_vp_config(struct scsi_qla_host *vha, 5454 struct vp_config_entry_24xx *vpmod) 5455 { 5456 if (qla_tgt_mode_enabled(vha)) 5457 vpmod->options_idx1 &= ~BIT_5; 5458 /* Disable ini mode, if requested */ 5459 if (!qla_ini_mode_enabled(vha)) 5460 vpmod->options_idx1 &= ~BIT_4; 5461 } 5462 5463 void 5464 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 5465 { 5466 if (!QLA_TGT_MODE_ENABLED()) 5467 return; 5468 5469 if (ha->mqenable || IS_QLA83XX(ha)) { 5470 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 5471 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 5472 } else { 5473 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 5474 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 5475 } 5476 5477 mutex_init(&base_vha->vha_tgt.tgt_mutex); 5478 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 5479 qlt_clear_mode(base_vha); 5480 } 5481 5482 irqreturn_t 5483 qla83xx_msix_atio_q(int irq, void *dev_id) 5484 { 5485 struct rsp_que *rsp; 5486 scsi_qla_host_t *vha; 5487 struct qla_hw_data *ha; 5488 unsigned long flags; 5489 5490 rsp = (struct rsp_que *) dev_id; 5491 ha = rsp->hw; 5492 vha = pci_get_drvdata(ha->pdev); 5493 5494 spin_lock_irqsave(&ha->hardware_lock, flags); 5495 5496 qlt_24xx_process_atio_queue(vha); 5497 qla24xx_process_response_queue(vha, rsp); 5498 5499 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5500 5501 return IRQ_HANDLED; 5502 } 5503 5504 int 5505 qlt_mem_alloc(struct qla_hw_data *ha) 5506 { 5507 if (!QLA_TGT_MODE_ENABLED()) 5508 return 0; 5509 5510 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 5511 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 5512 if (!ha->tgt.tgt_vp_map) 5513 return -ENOMEM; 5514 5515 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 5516 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 5517 &ha->tgt.atio_dma, GFP_KERNEL); 5518 if (!ha->tgt.atio_ring) { 5519 kfree(ha->tgt.tgt_vp_map); 5520 return -ENOMEM; 5521 } 5522 return 0; 5523 } 5524 5525 void 5526 qlt_mem_free(struct qla_hw_data *ha) 5527 { 5528 if (!QLA_TGT_MODE_ENABLED()) 5529 return; 5530 5531 if (ha->tgt.atio_ring) { 5532 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 5533 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 5534 ha->tgt.atio_dma); 5535 } 5536 kfree(ha->tgt.tgt_vp_map); 5537 } 5538 5539 /* vport_slock to be held by the caller */ 5540 void 5541 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 5542 { 5543 if (!QLA_TGT_MODE_ENABLED()) 5544 return; 5545 5546 switch (cmd) { 5547 case SET_VP_IDX: 5548 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 5549 break; 5550 case SET_AL_PA: 5551 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 5552 break; 5553 case RESET_VP_IDX: 5554 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 5555 break; 5556 case RESET_AL_PA: 5557 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 5558 break; 5559 } 5560 } 5561 5562 static int __init qlt_parse_ini_mode(void) 5563 { 5564 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 5565 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 5566 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 5567 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 5568 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 5569 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 5570 else 5571 return false; 5572 5573 return true; 5574 } 5575 5576 int __init qlt_init(void) 5577 { 5578 int ret; 5579 5580 if (!qlt_parse_ini_mode()) { 5581 ql_log(ql_log_fatal, NULL, 0xe06b, 5582 "qlt_parse_ini_mode() failed\n"); 5583 return -EINVAL; 5584 } 5585 5586 if (!QLA_TGT_MODE_ENABLED()) 5587 return 0; 5588 5589 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 5590 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 5591 qla_tgt_mgmt_cmd), 0, NULL); 5592 if (!qla_tgt_mgmt_cmd_cachep) { 5593 ql_log(ql_log_fatal, NULL, 0xe06d, 5594 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 5595 return -ENOMEM; 5596 } 5597 5598 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 5599 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 5600 if (!qla_tgt_mgmt_cmd_mempool) { 5601 ql_log(ql_log_fatal, NULL, 0xe06e, 5602 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 5603 ret = -ENOMEM; 5604 goto out_mgmt_cmd_cachep; 5605 } 5606 5607 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 5608 if (!qla_tgt_wq) { 5609 ql_log(ql_log_fatal, NULL, 0xe06f, 5610 "alloc_workqueue for qla_tgt_wq failed\n"); 5611 ret = -ENOMEM; 5612 goto out_cmd_mempool; 5613 } 5614 /* 5615 * Return 1 to signal that initiator-mode is being disabled 5616 */ 5617 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 5618 5619 out_cmd_mempool: 5620 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5621 out_mgmt_cmd_cachep: 5622 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5623 return ret; 5624 } 5625 5626 void qlt_exit(void) 5627 { 5628 if (!QLA_TGT_MODE_ENABLED()) 5629 return; 5630 5631 destroy_workqueue(qla_tgt_wq); 5632 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5633 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5634 } 5635