1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_cmd_cachep; 108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 109 static mempool_t *qla_tgt_mgmt_cmd_mempool; 110 static struct workqueue_struct *qla_tgt_wq; 111 static DEFINE_MUTEX(qla_tgt_mutex); 112 static LIST_HEAD(qla_tgt_glist); 113 114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 115 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 116 struct qla_tgt *tgt, 117 const uint8_t *port_name) 118 { 119 struct qla_tgt_sess *sess; 120 121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 122 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 123 return sess; 124 } 125 126 return NULL; 127 } 128 129 /* Might release hw lock, then reaquire!! */ 130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 131 { 132 /* Send marker if required */ 133 if (unlikely(vha->marker_needed != 0)) { 134 int rc = qla2x00_issue_marker(vha, vha_locked); 135 if (rc != QLA_SUCCESS) { 136 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 137 "qla_target(%d): issue_marker() failed\n", 138 vha->vp_idx); 139 } 140 return rc; 141 } 142 return QLA_SUCCESS; 143 } 144 145 static inline 146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 147 uint8_t *d_id) 148 { 149 struct qla_hw_data *ha = vha->hw; 150 uint8_t vp_idx; 151 152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 153 return NULL; 154 155 if (vha->d_id.b.al_pa == d_id[2]) 156 return vha; 157 158 BUG_ON(ha->tgt.tgt_vp_map == NULL); 159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 160 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 161 return ha->tgt.tgt_vp_map[vp_idx].vha; 162 163 return NULL; 164 } 165 166 static inline 167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 168 uint16_t vp_idx) 169 { 170 struct qla_hw_data *ha = vha->hw; 171 172 if (vha->vp_idx == vp_idx) 173 return vha; 174 175 BUG_ON(ha->tgt.tgt_vp_map == NULL); 176 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 177 return ha->tgt.tgt_vp_map[vp_idx].vha; 178 179 return NULL; 180 } 181 182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 183 struct atio_from_isp *atio) 184 { 185 switch (atio->u.raw.entry_type) { 186 case ATIO_TYPE7: 187 { 188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 189 atio->u.isp24.fcp_hdr.d_id); 190 if (unlikely(NULL == host)) { 191 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 192 "qla_target(%d): Received ATIO_TYPE7 " 193 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 194 atio->u.isp24.fcp_hdr.d_id[0], 195 atio->u.isp24.fcp_hdr.d_id[1], 196 atio->u.isp24.fcp_hdr.d_id[2]); 197 break; 198 } 199 qlt_24xx_atio_pkt(host, atio); 200 break; 201 } 202 203 case IMMED_NOTIFY_TYPE: 204 { 205 struct scsi_qla_host *host = vha; 206 struct imm_ntfy_from_isp *entry = 207 (struct imm_ntfy_from_isp *)atio; 208 209 if ((entry->u.isp24.vp_index != 0xFF) && 210 (entry->u.isp24.nport_handle != 0xFFFF)) { 211 host = qlt_find_host_by_vp_idx(vha, 212 entry->u.isp24.vp_index); 213 if (unlikely(!host)) { 214 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 215 "qla_target(%d): Received " 216 "ATIO (IMMED_NOTIFY_TYPE) " 217 "with unknown vp_index %d\n", 218 vha->vp_idx, entry->u.isp24.vp_index); 219 break; 220 } 221 } 222 qlt_24xx_atio_pkt(host, atio); 223 break; 224 } 225 226 default: 227 ql_dbg(ql_dbg_tgt, vha, 0xe040, 228 "qla_target(%d): Received unknown ATIO atio " 229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 230 break; 231 } 232 233 return; 234 } 235 236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 237 { 238 switch (pkt->entry_type) { 239 case CTIO_TYPE7: 240 { 241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 243 entry->vp_index); 244 if (unlikely(!host)) { 245 ql_dbg(ql_dbg_tgt, vha, 0xe041, 246 "qla_target(%d): Response pkt (CTIO_TYPE7) " 247 "received, with unknown vp_index %d\n", 248 vha->vp_idx, entry->vp_index); 249 break; 250 } 251 qlt_response_pkt(host, pkt); 252 break; 253 } 254 255 case IMMED_NOTIFY_TYPE: 256 { 257 struct scsi_qla_host *host = vha; 258 struct imm_ntfy_from_isp *entry = 259 (struct imm_ntfy_from_isp *)pkt; 260 261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 262 if (unlikely(!host)) { 263 ql_dbg(ql_dbg_tgt, vha, 0xe042, 264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 265 "received, with unknown vp_index %d\n", 266 vha->vp_idx, entry->u.isp24.vp_index); 267 break; 268 } 269 qlt_response_pkt(host, pkt); 270 break; 271 } 272 273 case NOTIFY_ACK_TYPE: 274 { 275 struct scsi_qla_host *host = vha; 276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 277 278 if (0xFF != entry->u.isp24.vp_index) { 279 host = qlt_find_host_by_vp_idx(vha, 280 entry->u.isp24.vp_index); 281 if (unlikely(!host)) { 282 ql_dbg(ql_dbg_tgt, vha, 0xe043, 283 "qla_target(%d): Response " 284 "pkt (NOTIFY_ACK_TYPE) " 285 "received, with unknown " 286 "vp_index %d\n", vha->vp_idx, 287 entry->u.isp24.vp_index); 288 break; 289 } 290 } 291 qlt_response_pkt(host, pkt); 292 break; 293 } 294 295 case ABTS_RECV_24XX: 296 { 297 struct abts_recv_from_24xx *entry = 298 (struct abts_recv_from_24xx *)pkt; 299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 300 entry->vp_index); 301 if (unlikely(!host)) { 302 ql_dbg(ql_dbg_tgt, vha, 0xe044, 303 "qla_target(%d): Response pkt " 304 "(ABTS_RECV_24XX) received, with unknown " 305 "vp_index %d\n", vha->vp_idx, entry->vp_index); 306 break; 307 } 308 qlt_response_pkt(host, pkt); 309 break; 310 } 311 312 case ABTS_RESP_24XX: 313 { 314 struct abts_resp_to_24xx *entry = 315 (struct abts_resp_to_24xx *)pkt; 316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 317 entry->vp_index); 318 if (unlikely(!host)) { 319 ql_dbg(ql_dbg_tgt, vha, 0xe045, 320 "qla_target(%d): Response pkt " 321 "(ABTS_RECV_24XX) received, with unknown " 322 "vp_index %d\n", vha->vp_idx, entry->vp_index); 323 break; 324 } 325 qlt_response_pkt(host, pkt); 326 break; 327 } 328 329 default: 330 qlt_response_pkt(vha, pkt); 331 break; 332 } 333 334 } 335 336 static void qlt_free_session_done(struct work_struct *work) 337 { 338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 339 free_work); 340 struct qla_tgt *tgt = sess->tgt; 341 struct scsi_qla_host *vha = sess->vha; 342 struct qla_hw_data *ha = vha->hw; 343 344 BUG_ON(!tgt); 345 /* 346 * Release the target session for FC Nexus from fabric module code. 347 */ 348 if (sess->se_sess != NULL) 349 ha->tgt.tgt_ops->free_session(sess); 350 351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 352 "Unregistration of sess %p finished\n", sess); 353 354 kfree(sess); 355 /* 356 * We need to protect against race, when tgt is freed before or 357 * inside wake_up() 358 */ 359 tgt->sess_count--; 360 if (tgt->sess_count == 0) 361 wake_up_all(&tgt->waitQ); 362 } 363 364 /* ha->hardware_lock supposed to be held on entry */ 365 void qlt_unreg_sess(struct qla_tgt_sess *sess) 366 { 367 struct scsi_qla_host *vha = sess->vha; 368 369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 370 371 list_del(&sess->sess_list_entry); 372 if (sess->deleted) 373 list_del(&sess->del_list_entry); 374 375 INIT_WORK(&sess->free_work, qlt_free_session_done); 376 schedule_work(&sess->free_work); 377 } 378 EXPORT_SYMBOL(qlt_unreg_sess); 379 380 /* ha->hardware_lock supposed to be held on entry */ 381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 struct qla_tgt_sess *sess = NULL; 385 uint32_t unpacked_lun, lun = 0; 386 uint16_t loop_id; 387 int res = 0; 388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 389 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 390 391 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 392 if (loop_id == 0xFFFF) { 393 #if 0 /* FIXME: Re-enable Global event handling.. */ 394 /* Global event */ 395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 399 typeof(*sess), sess_list_entry); 400 switch (mcmd) { 401 case QLA_TGT_NEXUS_LOSS_SESS: 402 mcmd = QLA_TGT_NEXUS_LOSS; 403 break; 404 case QLA_TGT_ABORT_ALL_SESS: 405 mcmd = QLA_TGT_ABORT_ALL; 406 break; 407 case QLA_TGT_NEXUS_LOSS: 408 case QLA_TGT_ABORT_ALL: 409 break; 410 default: 411 ql_dbg(ql_dbg_tgt, vha, 0xe046, 412 "qla_target(%d): Not allowed " 413 "command %x in %s", vha->vp_idx, 414 mcmd, __func__); 415 sess = NULL; 416 break; 417 } 418 } else 419 sess = NULL; 420 #endif 421 } else { 422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 423 } 424 425 ql_dbg(ql_dbg_tgt, vha, 0xe000, 426 "Using sess for qla_tgt_reset: %p\n", sess); 427 if (!sess) { 428 res = -ESRCH; 429 return res; 430 } 431 432 ql_dbg(ql_dbg_tgt, vha, 0xe047, 433 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 434 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 435 mcmd, loop_id); 436 437 lun = a->u.isp24.fcp_cmnd.lun; 438 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 439 440 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 441 iocb, QLA24XX_MGMT_SEND_NACK); 442 } 443 444 /* ha->hardware_lock supposed to be held on entry */ 445 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 446 bool immediate) 447 { 448 struct qla_tgt *tgt = sess->tgt; 449 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 450 451 if (sess->deleted) 452 return; 453 454 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 455 "Scheduling sess %p for deletion\n", sess); 456 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 457 sess->deleted = 1; 458 459 if (immediate) 460 dev_loss_tmo = 0; 461 462 sess->expires = jiffies + dev_loss_tmo * HZ; 463 464 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 465 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 466 "deletion in %u secs (expires: %lu) immed: %d\n", 467 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 468 sess->expires, immediate); 469 470 if (immediate) 471 schedule_delayed_work(&tgt->sess_del_work, 0); 472 else 473 schedule_delayed_work(&tgt->sess_del_work, 474 jiffies - sess->expires); 475 } 476 477 /* ha->hardware_lock supposed to be held on entry */ 478 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 479 { 480 struct qla_tgt_sess *sess; 481 482 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 483 qlt_schedule_sess_for_deletion(sess, true); 484 485 /* At this point tgt could be already dead */ 486 } 487 488 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 489 uint16_t *loop_id) 490 { 491 struct qla_hw_data *ha = vha->hw; 492 dma_addr_t gid_list_dma; 493 struct gid_list_info *gid_list; 494 char *id_iter; 495 int res, rc, i; 496 uint16_t entries; 497 498 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 499 &gid_list_dma, GFP_KERNEL); 500 if (!gid_list) { 501 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 502 "qla_target(%d): DMA Alloc failed of %u\n", 503 vha->vp_idx, qla2x00_gid_list_size(ha)); 504 return -ENOMEM; 505 } 506 507 /* Get list of logged in devices */ 508 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 509 if (rc != QLA_SUCCESS) { 510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 511 "qla_target(%d): get_id_list() failed: %x\n", 512 vha->vp_idx, rc); 513 res = -1; 514 goto out_free_id_list; 515 } 516 517 id_iter = (char *)gid_list; 518 res = -1; 519 for (i = 0; i < entries; i++) { 520 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 521 if ((gid->al_pa == s_id[2]) && 522 (gid->area == s_id[1]) && 523 (gid->domain == s_id[0])) { 524 *loop_id = le16_to_cpu(gid->loop_id); 525 res = 0; 526 break; 527 } 528 id_iter += ha->gid_list_info_size; 529 } 530 531 out_free_id_list: 532 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 533 gid_list, gid_list_dma); 534 return res; 535 } 536 537 /* ha->hardware_lock supposed to be held on entry */ 538 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 539 { 540 BUG_ON(!sess->deleted); 541 542 list_del(&sess->del_list_entry); 543 sess->deleted = 0; 544 } 545 546 static void qlt_del_sess_work_fn(struct delayed_work *work) 547 { 548 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 549 sess_del_work); 550 struct scsi_qla_host *vha = tgt->vha; 551 struct qla_hw_data *ha = vha->hw; 552 struct qla_tgt_sess *sess; 553 unsigned long flags; 554 555 spin_lock_irqsave(&ha->hardware_lock, flags); 556 while (!list_empty(&tgt->del_sess_list)) { 557 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 558 del_list_entry); 559 if (time_after_eq(jiffies, sess->expires)) { 560 qlt_undelete_sess(sess); 561 562 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 563 "Timeout: sess %p about to be deleted\n", 564 sess); 565 ha->tgt.tgt_ops->shutdown_sess(sess); 566 ha->tgt.tgt_ops->put_sess(sess); 567 } else { 568 schedule_delayed_work(&tgt->sess_del_work, 569 jiffies - sess->expires); 570 break; 571 } 572 } 573 spin_unlock_irqrestore(&ha->hardware_lock, flags); 574 } 575 576 /* 577 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 578 * Caller must put it. 579 */ 580 static struct qla_tgt_sess *qlt_create_sess( 581 struct scsi_qla_host *vha, 582 fc_port_t *fcport, 583 bool local) 584 { 585 struct qla_hw_data *ha = vha->hw; 586 struct qla_tgt_sess *sess; 587 unsigned long flags; 588 unsigned char be_sid[3]; 589 590 /* Check to avoid double sessions */ 591 spin_lock_irqsave(&ha->hardware_lock, flags); 592 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, 593 sess_list_entry) { 594 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 595 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 596 "Double sess %p found (s_id %x:%x:%x, " 597 "loop_id %d), updating to d_id %x:%x:%x, " 598 "loop_id %d", sess, sess->s_id.b.domain, 599 sess->s_id.b.al_pa, sess->s_id.b.area, 600 sess->loop_id, fcport->d_id.b.domain, 601 fcport->d_id.b.al_pa, fcport->d_id.b.area, 602 fcport->loop_id); 603 604 if (sess->deleted) 605 qlt_undelete_sess(sess); 606 607 kref_get(&sess->se_sess->sess_kref); 608 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 609 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 610 611 if (sess->local && !local) 612 sess->local = 0; 613 spin_unlock_irqrestore(&ha->hardware_lock, flags); 614 615 return sess; 616 } 617 } 618 spin_unlock_irqrestore(&ha->hardware_lock, flags); 619 620 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 621 if (!sess) { 622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 623 "qla_target(%u): session allocation failed, all commands " 624 "from port %8phC will be refused", vha->vp_idx, 625 fcport->port_name); 626 627 return NULL; 628 } 629 sess->tgt = ha->tgt.qla_tgt; 630 sess->vha = vha; 631 sess->s_id = fcport->d_id; 632 sess->loop_id = fcport->loop_id; 633 sess->local = local; 634 635 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 636 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 637 sess, ha->tgt.qla_tgt); 638 639 be_sid[0] = sess->s_id.b.domain; 640 be_sid[1] = sess->s_id.b.area; 641 be_sid[2] = sess->s_id.b.al_pa; 642 /* 643 * Determine if this fc_port->port_name is allowed to access 644 * target mode using explict NodeACLs+MappedLUNs, or using 645 * TPG demo mode. If this is successful a target mode FC nexus 646 * is created. 647 */ 648 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 649 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 650 kfree(sess); 651 return NULL; 652 } 653 /* 654 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 655 * access across ->hardware_lock reaquire. 656 */ 657 kref_get(&sess->se_sess->sess_kref); 658 659 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 660 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 661 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 662 663 spin_lock_irqsave(&ha->hardware_lock, flags); 664 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); 665 ha->tgt.qla_tgt->sess_count++; 666 spin_unlock_irqrestore(&ha->hardware_lock, flags); 667 668 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 669 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 670 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 671 vha->vp_idx, local ? "local " : "", fcport->port_name, 672 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, 673 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 674 675 return sess; 676 } 677 678 /* 679 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 680 */ 681 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 682 { 683 struct qla_hw_data *ha = vha->hw; 684 struct qla_tgt *tgt = ha->tgt.qla_tgt; 685 struct qla_tgt_sess *sess; 686 unsigned long flags; 687 688 if (!vha->hw->tgt.tgt_ops) 689 return; 690 691 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 692 return; 693 694 spin_lock_irqsave(&ha->hardware_lock, flags); 695 if (tgt->tgt_stop) { 696 spin_unlock_irqrestore(&ha->hardware_lock, flags); 697 return; 698 } 699 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 700 if (!sess) { 701 spin_unlock_irqrestore(&ha->hardware_lock, flags); 702 703 mutex_lock(&ha->tgt.tgt_mutex); 704 sess = qlt_create_sess(vha, fcport, false); 705 mutex_unlock(&ha->tgt.tgt_mutex); 706 707 spin_lock_irqsave(&ha->hardware_lock, flags); 708 } else { 709 kref_get(&sess->se_sess->sess_kref); 710 711 if (sess->deleted) { 712 qlt_undelete_sess(sess); 713 714 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 715 "qla_target(%u): %ssession for port %8phC " 716 "(loop ID %d) reappeared\n", vha->vp_idx, 717 sess->local ? "local " : "", sess->port_name, 718 sess->loop_id); 719 720 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 721 "Reappeared sess %p\n", sess); 722 } 723 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 724 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 725 } 726 727 if (sess && sess->local) { 728 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 729 "qla_target(%u): local session for " 730 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 731 fcport->port_name, sess->loop_id); 732 sess->local = 0; 733 } 734 ha->tgt.tgt_ops->put_sess(sess); 735 spin_unlock_irqrestore(&ha->hardware_lock, flags); 736 } 737 738 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 739 { 740 struct qla_hw_data *ha = vha->hw; 741 struct qla_tgt *tgt = ha->tgt.qla_tgt; 742 struct qla_tgt_sess *sess; 743 unsigned long flags; 744 745 if (!vha->hw->tgt.tgt_ops) 746 return; 747 748 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 749 return; 750 751 spin_lock_irqsave(&ha->hardware_lock, flags); 752 if (tgt->tgt_stop) { 753 spin_unlock_irqrestore(&ha->hardware_lock, flags); 754 return; 755 } 756 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 757 if (!sess) { 758 spin_unlock_irqrestore(&ha->hardware_lock, flags); 759 return; 760 } 761 762 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 763 764 sess->local = 1; 765 qlt_schedule_sess_for_deletion(sess, false); 766 spin_unlock_irqrestore(&ha->hardware_lock, flags); 767 } 768 769 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 770 { 771 struct qla_hw_data *ha = tgt->ha; 772 unsigned long flags; 773 int res; 774 /* 775 * We need to protect against race, when tgt is freed before or 776 * inside wake_up() 777 */ 778 spin_lock_irqsave(&ha->hardware_lock, flags); 779 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 780 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 781 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 782 res = (tgt->sess_count == 0); 783 spin_unlock_irqrestore(&ha->hardware_lock, flags); 784 785 return res; 786 } 787 788 /* Called by tcm_qla2xxx configfs code */ 789 void qlt_stop_phase1(struct qla_tgt *tgt) 790 { 791 struct scsi_qla_host *vha = tgt->vha; 792 struct qla_hw_data *ha = tgt->ha; 793 unsigned long flags; 794 795 if (tgt->tgt_stop || tgt->tgt_stopped) { 796 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 797 "Already in tgt->tgt_stop or tgt_stopped state\n"); 798 dump_stack(); 799 return; 800 } 801 802 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 803 vha->host_no, vha); 804 /* 805 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 806 * Lock is needed, because we still can get an incoming packet. 807 */ 808 mutex_lock(&ha->tgt.tgt_mutex); 809 spin_lock_irqsave(&ha->hardware_lock, flags); 810 tgt->tgt_stop = 1; 811 qlt_clear_tgt_db(tgt, true); 812 spin_unlock_irqrestore(&ha->hardware_lock, flags); 813 mutex_unlock(&ha->tgt.tgt_mutex); 814 815 flush_delayed_work(&tgt->sess_del_work); 816 817 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 818 "Waiting for sess works (tgt %p)", tgt); 819 spin_lock_irqsave(&tgt->sess_work_lock, flags); 820 while (!list_empty(&tgt->sess_works_list)) { 821 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 822 flush_scheduled_work(); 823 spin_lock_irqsave(&tgt->sess_work_lock, flags); 824 } 825 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 826 827 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 828 "Waiting for tgt %p: list_empty(sess_list)=%d " 829 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 830 tgt->sess_count); 831 832 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 833 834 /* Big hammer */ 835 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 836 qlt_disable_vha(vha); 837 838 /* Wait for sessions to clear out (just in case) */ 839 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 840 } 841 EXPORT_SYMBOL(qlt_stop_phase1); 842 843 /* Called by tcm_qla2xxx configfs code */ 844 void qlt_stop_phase2(struct qla_tgt *tgt) 845 { 846 struct qla_hw_data *ha = tgt->ha; 847 unsigned long flags; 848 849 if (tgt->tgt_stopped) { 850 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, 851 "Already in tgt->tgt_stopped state\n"); 852 dump_stack(); 853 return; 854 } 855 856 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, 857 "Waiting for %d IRQ commands to complete (tgt %p)", 858 tgt->irq_cmd_count, tgt); 859 860 mutex_lock(&ha->tgt.tgt_mutex); 861 spin_lock_irqsave(&ha->hardware_lock, flags); 862 while (tgt->irq_cmd_count != 0) { 863 spin_unlock_irqrestore(&ha->hardware_lock, flags); 864 udelay(2); 865 spin_lock_irqsave(&ha->hardware_lock, flags); 866 } 867 tgt->tgt_stop = 0; 868 tgt->tgt_stopped = 1; 869 spin_unlock_irqrestore(&ha->hardware_lock, flags); 870 mutex_unlock(&ha->tgt.tgt_mutex); 871 872 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", 873 tgt); 874 } 875 EXPORT_SYMBOL(qlt_stop_phase2); 876 877 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 878 static void qlt_release(struct qla_tgt *tgt) 879 { 880 struct qla_hw_data *ha = tgt->ha; 881 882 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 883 qlt_stop_phase2(tgt); 884 885 ha->tgt.qla_tgt = NULL; 886 887 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, 888 "Release of tgt %p finished\n", tgt); 889 890 kfree(tgt); 891 } 892 893 /* ha->hardware_lock supposed to be held on entry */ 894 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 895 const void *param, unsigned int param_size) 896 { 897 struct qla_tgt_sess_work_param *prm; 898 unsigned long flags; 899 900 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 901 if (!prm) { 902 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 903 "qla_target(%d): Unable to create session " 904 "work, command will be refused", 0); 905 return -ENOMEM; 906 } 907 908 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 909 "Scheduling work (type %d, prm %p)" 910 " to find session for param %p (size %d, tgt %p)\n", 911 type, prm, param, param_size, tgt); 912 913 prm->type = type; 914 memcpy(&prm->tm_iocb, param, param_size); 915 916 spin_lock_irqsave(&tgt->sess_work_lock, flags); 917 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 918 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 919 920 schedule_work(&tgt->sess_work); 921 922 return 0; 923 } 924 925 /* 926 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 927 */ 928 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 929 struct imm_ntfy_from_isp *ntfy, 930 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 931 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 932 { 933 struct qla_hw_data *ha = vha->hw; 934 request_t *pkt; 935 struct nack_to_isp *nack; 936 937 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 938 939 /* Send marker if required */ 940 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 941 return; 942 943 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 944 if (!pkt) { 945 ql_dbg(ql_dbg_tgt, vha, 0xe049, 946 "qla_target(%d): %s failed: unable to allocate " 947 "request packet\n", vha->vp_idx, __func__); 948 return; 949 } 950 951 if (ha->tgt.qla_tgt != NULL) 952 ha->tgt.qla_tgt->notify_ack_expected++; 953 954 pkt->entry_type = NOTIFY_ACK_TYPE; 955 pkt->entry_count = 1; 956 957 nack = (struct nack_to_isp *)pkt; 958 nack->ox_id = ntfy->ox_id; 959 960 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 961 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 962 nack->u.isp24.flags = ntfy->u.isp24.flags & 963 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 964 } 965 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 966 nack->u.isp24.status = ntfy->u.isp24.status; 967 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 968 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 969 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 970 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 971 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 972 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 973 nack->u.isp24.srr_reject_code = srr_reject_code; 974 nack->u.isp24.srr_reject_code_expl = srr_explan; 975 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 976 977 ql_dbg(ql_dbg_tgt, vha, 0xe005, 978 "qla_target(%d): Sending 24xx Notify Ack %d\n", 979 vha->vp_idx, nack->u.isp24.status); 980 981 qla2x00_start_iocbs(vha, vha->req); 982 } 983 984 /* 985 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 986 */ 987 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 988 struct abts_recv_from_24xx *abts, uint32_t status, 989 bool ids_reversed) 990 { 991 struct qla_hw_data *ha = vha->hw; 992 struct abts_resp_to_24xx *resp; 993 uint32_t f_ctl; 994 uint8_t *p; 995 996 ql_dbg(ql_dbg_tgt, vha, 0xe006, 997 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 998 ha, abts, status); 999 1000 /* Send marker if required */ 1001 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1002 return; 1003 1004 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1005 if (!resp) { 1006 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1007 "qla_target(%d): %s failed: unable to allocate " 1008 "request packet", vha->vp_idx, __func__); 1009 return; 1010 } 1011 1012 resp->entry_type = ABTS_RESP_24XX; 1013 resp->entry_count = 1; 1014 resp->nport_handle = abts->nport_handle; 1015 resp->vp_index = vha->vp_idx; 1016 resp->sof_type = abts->sof_type; 1017 resp->exchange_address = abts->exchange_address; 1018 resp->fcp_hdr_le = abts->fcp_hdr_le; 1019 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1020 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1021 F_CTL_SEQ_INITIATIVE); 1022 p = (uint8_t *)&f_ctl; 1023 resp->fcp_hdr_le.f_ctl[0] = *p++; 1024 resp->fcp_hdr_le.f_ctl[1] = *p++; 1025 resp->fcp_hdr_le.f_ctl[2] = *p; 1026 if (ids_reversed) { 1027 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1028 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1029 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1030 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1031 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1032 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1033 } else { 1034 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1035 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1036 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1037 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1038 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1039 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1040 } 1041 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1042 if (status == FCP_TMF_CMPL) { 1043 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1044 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1045 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1046 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1047 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1048 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1049 } else { 1050 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1051 resp->payload.ba_rjt.reason_code = 1052 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1053 /* Other bytes are zero */ 1054 } 1055 1056 ha->tgt.qla_tgt->abts_resp_expected++; 1057 1058 qla2x00_start_iocbs(vha, vha->req); 1059 } 1060 1061 /* 1062 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1063 */ 1064 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1065 struct abts_resp_from_24xx_fw *entry) 1066 { 1067 struct ctio7_to_24xx *ctio; 1068 1069 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1070 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1071 /* Send marker if required */ 1072 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1073 return; 1074 1075 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1076 if (ctio == NULL) { 1077 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1078 "qla_target(%d): %s failed: unable to allocate " 1079 "request packet\n", vha->vp_idx, __func__); 1080 return; 1081 } 1082 1083 /* 1084 * We've got on entrance firmware's response on by us generated 1085 * ABTS response. So, in it ID fields are reversed. 1086 */ 1087 1088 ctio->entry_type = CTIO_TYPE7; 1089 ctio->entry_count = 1; 1090 ctio->nport_handle = entry->nport_handle; 1091 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1092 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1093 ctio->vp_index = vha->vp_idx; 1094 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1095 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1096 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1097 ctio->exchange_addr = entry->exchange_addr_to_abort; 1098 ctio->u.status1.flags = 1099 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1100 CTIO7_FLAGS_TERMINATE); 1101 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1102 1103 qla2x00_start_iocbs(vha, vha->req); 1104 1105 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1106 FCP_TMF_CMPL, true); 1107 } 1108 1109 /* ha->hardware_lock supposed to be held on entry */ 1110 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1111 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1112 { 1113 struct qla_hw_data *ha = vha->hw; 1114 struct se_session *se_sess = sess->se_sess; 1115 struct qla_tgt_mgmt_cmd *mcmd; 1116 struct se_cmd *se_cmd; 1117 u32 lun = 0; 1118 int rc; 1119 bool found_lun = false; 1120 1121 spin_lock(&se_sess->sess_cmd_lock); 1122 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1123 struct qla_tgt_cmd *cmd = 1124 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1125 if (cmd->tag == abts->exchange_addr_to_abort) { 1126 lun = cmd->unpacked_lun; 1127 found_lun = true; 1128 break; 1129 } 1130 } 1131 spin_unlock(&se_sess->sess_cmd_lock); 1132 1133 if (!found_lun) 1134 return -ENOENT; 1135 1136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1137 "qla_target(%d): task abort (tag=%d)\n", 1138 vha->vp_idx, abts->exchange_addr_to_abort); 1139 1140 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1141 if (mcmd == NULL) { 1142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1143 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1144 vha->vp_idx, __func__); 1145 return -ENOMEM; 1146 } 1147 memset(mcmd, 0, sizeof(*mcmd)); 1148 1149 mcmd->sess = sess; 1150 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1151 1152 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1153 abts->exchange_addr_to_abort); 1154 if (rc != 0) { 1155 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1156 "qla_target(%d): tgt_ops->handle_tmr()" 1157 " failed: %d", vha->vp_idx, rc); 1158 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1159 return -EFAULT; 1160 } 1161 1162 return 0; 1163 } 1164 1165 /* 1166 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1167 */ 1168 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1169 struct abts_recv_from_24xx *abts) 1170 { 1171 struct qla_hw_data *ha = vha->hw; 1172 struct qla_tgt_sess *sess; 1173 uint32_t tag = abts->exchange_addr_to_abort; 1174 uint8_t s_id[3]; 1175 int rc; 1176 1177 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1178 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1179 "qla_target(%d): ABTS: Abort Sequence not " 1180 "supported\n", vha->vp_idx); 1181 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1182 return; 1183 } 1184 1185 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1187 "qla_target(%d): ABTS: Unknown Exchange " 1188 "Address received\n", vha->vp_idx); 1189 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1190 return; 1191 } 1192 1193 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1194 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1195 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1196 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1197 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1198 1199 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1200 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1201 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1202 1203 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1204 if (!sess) { 1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1206 "qla_target(%d): task abort for non-existant session\n", 1207 vha->vp_idx); 1208 rc = qlt_sched_sess_work(ha->tgt.qla_tgt, 1209 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1210 if (rc != 0) { 1211 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1212 false); 1213 } 1214 return; 1215 } 1216 1217 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1218 if (rc != 0) { 1219 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1220 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1221 vha->vp_idx, rc); 1222 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1223 return; 1224 } 1225 } 1226 1227 /* 1228 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1229 */ 1230 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1231 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1232 { 1233 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1234 struct ctio7_to_24xx *ctio; 1235 1236 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1237 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1238 ha, atio, resp_code); 1239 1240 /* Send marker if required */ 1241 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1242 return; 1243 1244 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1245 if (ctio == NULL) { 1246 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1247 "qla_target(%d): %s failed: unable to allocate " 1248 "request packet\n", ha->vp_idx, __func__); 1249 return; 1250 } 1251 1252 ctio->entry_type = CTIO_TYPE7; 1253 ctio->entry_count = 1; 1254 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1255 ctio->nport_handle = mcmd->sess->loop_id; 1256 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1257 ctio->vp_index = ha->vp_idx; 1258 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1259 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1260 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1261 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1262 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1263 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1264 CTIO7_FLAGS_SEND_STATUS); 1265 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1266 ctio->u.status1.scsi_status = 1267 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1268 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1269 ctio->u.status1.sense_data[0] = resp_code; 1270 1271 qla2x00_start_iocbs(ha, ha->req); 1272 } 1273 1274 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1275 { 1276 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1277 } 1278 EXPORT_SYMBOL(qlt_free_mcmd); 1279 1280 /* callback from target fabric module code */ 1281 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1282 { 1283 struct scsi_qla_host *vha = mcmd->sess->vha; 1284 struct qla_hw_data *ha = vha->hw; 1285 unsigned long flags; 1286 1287 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1288 "TM response mcmd (%p) status %#x state %#x", 1289 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1290 1291 spin_lock_irqsave(&ha->hardware_lock, flags); 1292 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1293 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1294 0, 0, 0, 0, 0, 0); 1295 else { 1296 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1297 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1298 mcmd->fc_tm_rsp, false); 1299 else 1300 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1301 mcmd->fc_tm_rsp); 1302 } 1303 /* 1304 * Make the callback for ->free_mcmd() to queue_work() and invoke 1305 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1306 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1307 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1308 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1309 * qlt_xmit_tm_rsp() returns here.. 1310 */ 1311 ha->tgt.tgt_ops->free_mcmd(mcmd); 1312 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1313 } 1314 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1315 1316 /* No locks */ 1317 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1318 { 1319 struct qla_tgt_cmd *cmd = prm->cmd; 1320 1321 BUG_ON(cmd->sg_cnt == 0); 1322 1323 prm->sg = (struct scatterlist *)cmd->sg; 1324 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1325 cmd->sg_cnt, cmd->dma_data_direction); 1326 if (unlikely(prm->seg_cnt == 0)) 1327 goto out_err; 1328 1329 prm->cmd->sg_mapped = 1; 1330 1331 /* 1332 * If greater than four sg entries then we need to allocate 1333 * the continuation entries 1334 */ 1335 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1336 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1337 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); 1338 1339 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1340 prm->seg_cnt, prm->req_cnt); 1341 return 0; 1342 1343 out_err: 1344 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1345 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1346 0, prm->cmd->sg_cnt); 1347 return -1; 1348 } 1349 1350 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1351 struct qla_tgt_cmd *cmd) 1352 { 1353 struct qla_hw_data *ha = vha->hw; 1354 1355 BUG_ON(!cmd->sg_mapped); 1356 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1357 cmd->sg_mapped = 0; 1358 } 1359 1360 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1361 uint32_t req_cnt) 1362 { 1363 struct qla_hw_data *ha = vha->hw; 1364 device_reg_t __iomem *reg = ha->iobase; 1365 uint32_t cnt; 1366 1367 if (vha->req->cnt < (req_cnt + 2)) { 1368 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1369 1370 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1371 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1372 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1373 vha->req->ring_index, vha->req->cnt, req_cnt); 1374 if (vha->req->ring_index < cnt) 1375 vha->req->cnt = cnt - vha->req->ring_index; 1376 else 1377 vha->req->cnt = vha->req->length - 1378 (vha->req->ring_index - cnt); 1379 } 1380 1381 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1382 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1383 "qla_target(%d): There is no room in the " 1384 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1385 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1386 vha->req->cnt, req_cnt); 1387 return -EAGAIN; 1388 } 1389 vha->req->cnt -= req_cnt; 1390 1391 return 0; 1392 } 1393 1394 /* 1395 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1396 */ 1397 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1398 { 1399 /* Adjust ring index. */ 1400 vha->req->ring_index++; 1401 if (vha->req->ring_index == vha->req->length) { 1402 vha->req->ring_index = 0; 1403 vha->req->ring_ptr = vha->req->ring; 1404 } else { 1405 vha->req->ring_ptr++; 1406 } 1407 return (cont_entry_t *)vha->req->ring_ptr; 1408 } 1409 1410 /* ha->hardware_lock supposed to be held on entry */ 1411 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1412 { 1413 struct qla_hw_data *ha = vha->hw; 1414 uint32_t h; 1415 1416 h = ha->tgt.current_handle; 1417 /* always increment cmd handle */ 1418 do { 1419 ++h; 1420 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1421 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1422 if (h == ha->tgt.current_handle) { 1423 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1424 "qla_target(%d): Ran out of " 1425 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1426 h = QLA_TGT_NULL_HANDLE; 1427 break; 1428 } 1429 } while ((h == QLA_TGT_NULL_HANDLE) || 1430 (h == QLA_TGT_SKIP_HANDLE) || 1431 (ha->tgt.cmds[h-1] != NULL)); 1432 1433 if (h != QLA_TGT_NULL_HANDLE) 1434 ha->tgt.current_handle = h; 1435 1436 return h; 1437 } 1438 1439 /* ha->hardware_lock supposed to be held on entry */ 1440 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1441 struct scsi_qla_host *vha) 1442 { 1443 uint32_t h; 1444 struct ctio7_to_24xx *pkt; 1445 struct qla_hw_data *ha = vha->hw; 1446 struct atio_from_isp *atio = &prm->cmd->atio; 1447 1448 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1449 prm->pkt = pkt; 1450 memset(pkt, 0, sizeof(*pkt)); 1451 1452 pkt->entry_type = CTIO_TYPE7; 1453 pkt->entry_count = (uint8_t)prm->req_cnt; 1454 pkt->vp_index = vha->vp_idx; 1455 1456 h = qlt_make_handle(vha); 1457 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1458 /* 1459 * CTIO type 7 from the firmware doesn't provide a way to 1460 * know the initiator's LOOP ID, hence we can't find 1461 * the session and, so, the command. 1462 */ 1463 return -EAGAIN; 1464 } else 1465 ha->tgt.cmds[h-1] = prm->cmd; 1466 1467 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1468 pkt->nport_handle = prm->cmd->loop_id; 1469 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1470 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1471 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1472 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1473 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1474 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1475 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1476 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1477 1478 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1479 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1480 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1481 le16_to_cpu(pkt->u.status0.ox_id)); 1482 return 0; 1483 } 1484 1485 /* 1486 * ha->hardware_lock supposed to be held on entry. We have already made sure 1487 * that there is sufficient amount of request entries to not drop it. 1488 */ 1489 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1490 struct scsi_qla_host *vha) 1491 { 1492 int cnt; 1493 uint32_t *dword_ptr; 1494 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1495 1496 /* Build continuation packets */ 1497 while (prm->seg_cnt > 0) { 1498 cont_a64_entry_t *cont_pkt64 = 1499 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1500 1501 /* 1502 * Make sure that from cont_pkt64 none of 1503 * 64-bit specific fields used for 32-bit 1504 * addressing. Cast to (cont_entry_t *) for 1505 * that. 1506 */ 1507 1508 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1509 1510 cont_pkt64->entry_count = 1; 1511 cont_pkt64->sys_define = 0; 1512 1513 if (enable_64bit_addressing) { 1514 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1515 dword_ptr = 1516 (uint32_t *)&cont_pkt64->dseg_0_address; 1517 } else { 1518 cont_pkt64->entry_type = CONTINUE_TYPE; 1519 dword_ptr = 1520 (uint32_t *)&((cont_entry_t *) 1521 cont_pkt64)->dseg_0_address; 1522 } 1523 1524 /* Load continuation entry data segments */ 1525 for (cnt = 0; 1526 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1527 cnt++, prm->seg_cnt--) { 1528 *dword_ptr++ = 1529 cpu_to_le32(pci_dma_lo32 1530 (sg_dma_address(prm->sg))); 1531 if (enable_64bit_addressing) { 1532 *dword_ptr++ = 1533 cpu_to_le32(pci_dma_hi32 1534 (sg_dma_address 1535 (prm->sg))); 1536 } 1537 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1538 1539 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1540 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1541 (long long unsigned int) 1542 pci_dma_hi32(sg_dma_address(prm->sg)), 1543 (long long unsigned int) 1544 pci_dma_lo32(sg_dma_address(prm->sg)), 1545 (int)sg_dma_len(prm->sg)); 1546 1547 prm->sg = sg_next(prm->sg); 1548 } 1549 } 1550 } 1551 1552 /* 1553 * ha->hardware_lock supposed to be held on entry. We have already made sure 1554 * that there is sufficient amount of request entries to not drop it. 1555 */ 1556 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1557 struct scsi_qla_host *vha) 1558 { 1559 int cnt; 1560 uint32_t *dword_ptr; 1561 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1562 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1563 1564 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1565 "iocb->scsi_status=%x, iocb->flags=%x\n", 1566 le16_to_cpu(pkt24->u.status0.scsi_status), 1567 le16_to_cpu(pkt24->u.status0.flags)); 1568 1569 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1570 1571 /* Setup packet address segment pointer */ 1572 dword_ptr = pkt24->u.status0.dseg_0_address; 1573 1574 /* Set total data segment count */ 1575 if (prm->seg_cnt) 1576 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1577 1578 if (prm->seg_cnt == 0) { 1579 /* No data transfer */ 1580 *dword_ptr++ = 0; 1581 *dword_ptr = 0; 1582 return; 1583 } 1584 1585 /* If scatter gather */ 1586 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1587 1588 /* Load command entry data segments */ 1589 for (cnt = 0; 1590 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1591 cnt++, prm->seg_cnt--) { 1592 *dword_ptr++ = 1593 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1594 if (enable_64bit_addressing) { 1595 *dword_ptr++ = 1596 cpu_to_le32(pci_dma_hi32( 1597 sg_dma_address(prm->sg))); 1598 } 1599 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1600 1601 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1602 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1603 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1604 prm->sg)), 1605 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1606 prm->sg)), 1607 (int)sg_dma_len(prm->sg)); 1608 1609 prm->sg = sg_next(prm->sg); 1610 } 1611 1612 qlt_load_cont_data_segments(prm, vha); 1613 } 1614 1615 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1616 { 1617 return cmd->bufflen > 0; 1618 } 1619 1620 /* 1621 * Called without ha->hardware_lock held 1622 */ 1623 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1624 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1625 uint32_t *full_req_cnt) 1626 { 1627 struct qla_tgt *tgt = cmd->tgt; 1628 struct scsi_qla_host *vha = tgt->vha; 1629 struct qla_hw_data *ha = vha->hw; 1630 struct se_cmd *se_cmd = &cmd->se_cmd; 1631 1632 if (unlikely(cmd->aborted)) { 1633 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1634 "qla_target(%d): terminating exchange " 1635 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1636 se_cmd, cmd->tag); 1637 1638 cmd->state = QLA_TGT_STATE_ABORTED; 1639 1640 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1641 1642 /* !! At this point cmd could be already freed !! */ 1643 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1644 } 1645 1646 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", 1647 vha->vp_idx, cmd->tag); 1648 1649 prm->cmd = cmd; 1650 prm->tgt = tgt; 1651 prm->rq_result = scsi_status; 1652 prm->sense_buffer = &cmd->sense_buffer[0]; 1653 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1654 prm->sg = NULL; 1655 prm->seg_cnt = -1; 1656 prm->req_cnt = 1; 1657 prm->add_status_pkt = 0; 1658 1659 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1660 prm->rq_result, xmit_type); 1661 1662 /* Send marker if required */ 1663 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1664 return -EFAULT; 1665 1666 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1667 1668 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1669 if (qlt_pci_map_calc_cnt(prm) != 0) 1670 return -EAGAIN; 1671 } 1672 1673 *full_req_cnt = prm->req_cnt; 1674 1675 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1676 prm->residual = se_cmd->residual_count; 1677 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1678 "Residual underflow: %d (tag %d, " 1679 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1680 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1681 cmd->bufflen, prm->rq_result); 1682 prm->rq_result |= SS_RESIDUAL_UNDER; 1683 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1684 prm->residual = se_cmd->residual_count; 1685 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1686 "Residual overflow: %d (tag %d, " 1687 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1688 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1689 cmd->bufflen, prm->rq_result); 1690 prm->rq_result |= SS_RESIDUAL_OVER; 1691 } 1692 1693 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1694 /* 1695 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1696 * ignored in *xmit_response() below 1697 */ 1698 if (qlt_has_data(cmd)) { 1699 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1700 (IS_FWI2_CAPABLE(ha) && 1701 (prm->rq_result != 0))) { 1702 prm->add_status_pkt = 1; 1703 (*full_req_cnt)++; 1704 } 1705 } 1706 } 1707 1708 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1709 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1710 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1711 1712 return 0; 1713 } 1714 1715 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1716 struct qla_tgt_cmd *cmd, int sending_sense) 1717 { 1718 if (ha->tgt.enable_class_2) 1719 return 0; 1720 1721 if (sending_sense) 1722 return cmd->conf_compl_supported; 1723 else 1724 return ha->tgt.enable_explicit_conf && 1725 cmd->conf_compl_supported; 1726 } 1727 1728 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1729 /* 1730 * Original taken from the XFS code 1731 */ 1732 static unsigned long qlt_srr_random(void) 1733 { 1734 static int Inited; 1735 static unsigned long RandomValue; 1736 static DEFINE_SPINLOCK(lock); 1737 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1738 register long rv; 1739 register long lo; 1740 register long hi; 1741 unsigned long flags; 1742 1743 spin_lock_irqsave(&lock, flags); 1744 if (!Inited) { 1745 RandomValue = jiffies; 1746 Inited = 1; 1747 } 1748 rv = RandomValue; 1749 hi = rv / 127773; 1750 lo = rv % 127773; 1751 rv = 16807 * lo - 2836 * hi; 1752 if (rv <= 0) 1753 rv += 2147483647; 1754 RandomValue = rv; 1755 spin_unlock_irqrestore(&lock, flags); 1756 return rv; 1757 } 1758 1759 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1760 { 1761 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1762 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1763 == 50) { 1764 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1765 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1766 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1767 } 1768 #endif 1769 /* 1770 * It's currently not possible to simulate SRRs for FCP_WRITE without 1771 * a physical link layer failure, so don't even try here.. 1772 */ 1773 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1774 return; 1775 1776 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1777 ((qlt_srr_random() % 100) == 20)) { 1778 int i, leave = 0; 1779 unsigned int tot_len = 0; 1780 1781 while (leave == 0) 1782 leave = qlt_srr_random() % cmd->sg_cnt; 1783 1784 for (i = 0; i < leave; i++) 1785 tot_len += cmd->sg[i].length; 1786 1787 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1788 "Cutting cmd %p (tag %d) buffer" 1789 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1790 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1791 cmd->bufflen, cmd->sg_cnt); 1792 1793 cmd->bufflen = tot_len; 1794 cmd->sg_cnt = leave; 1795 } 1796 1797 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1798 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1799 1800 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1801 "Cutting cmd %p (tag %d) buffer head " 1802 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1803 cmd->bufflen); 1804 if (offset == 0) 1805 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1806 else if (qlt_set_data_offset(cmd, offset)) { 1807 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1808 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1809 } 1810 } 1811 } 1812 #else 1813 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1814 {} 1815 #endif 1816 1817 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1818 struct qla_tgt_prm *prm) 1819 { 1820 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1821 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1822 ctio->u.status0.flags |= 1823 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1824 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1825 ctio->u.status0.flags |= __constant_cpu_to_le16( 1826 CTIO7_FLAGS_EXPLICIT_CONFORM | 1827 CTIO7_FLAGS_CONFORM_REQ); 1828 } 1829 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1830 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1831 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1832 int i; 1833 1834 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1835 if (prm->cmd->se_cmd.scsi_status != 0) { 1836 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1837 "Skipping EXPLICIT_CONFORM and " 1838 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1839 "non GOOD status\n"); 1840 goto skip_explict_conf; 1841 } 1842 ctio->u.status1.flags |= __constant_cpu_to_le16( 1843 CTIO7_FLAGS_EXPLICIT_CONFORM | 1844 CTIO7_FLAGS_CONFORM_REQ); 1845 } 1846 skip_explict_conf: 1847 ctio->u.status1.flags &= 1848 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1849 ctio->u.status1.flags |= 1850 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1851 ctio->u.status1.scsi_status |= 1852 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 1853 ctio->u.status1.sense_length = 1854 cpu_to_le16(prm->sense_buffer_len); 1855 for (i = 0; i < prm->sense_buffer_len/4; i++) 1856 ((uint32_t *)ctio->u.status1.sense_data)[i] = 1857 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 1858 #if 0 1859 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 1860 static int q; 1861 if (q < 10) { 1862 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 1863 "qla_target(%d): %d bytes of sense " 1864 "lost", prm->tgt->ha->vp_idx, 1865 prm->sense_buffer_len % 4); 1866 q++; 1867 } 1868 } 1869 #endif 1870 } else { 1871 ctio->u.status1.flags &= 1872 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 1873 ctio->u.status1.flags |= 1874 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 1875 ctio->u.status1.sense_length = 0; 1876 memset(ctio->u.status1.sense_data, 0, 1877 sizeof(ctio->u.status1.sense_data)); 1878 } 1879 1880 /* Sense with len > 24, is it possible ??? */ 1881 } 1882 1883 /* 1884 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 1885 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 1886 */ 1887 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 1888 uint8_t scsi_status) 1889 { 1890 struct scsi_qla_host *vha = cmd->vha; 1891 struct qla_hw_data *ha = vha->hw; 1892 struct ctio7_to_24xx *pkt; 1893 struct qla_tgt_prm prm; 1894 uint32_t full_req_cnt = 0; 1895 unsigned long flags = 0; 1896 int res; 1897 1898 memset(&prm, 0, sizeof(prm)); 1899 qlt_check_srr_debug(cmd, &xmit_type); 1900 1901 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 1902 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " 1903 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1904 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); 1905 1906 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 1907 &full_req_cnt); 1908 if (unlikely(res != 0)) { 1909 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 1910 return 0; 1911 1912 return res; 1913 } 1914 1915 spin_lock_irqsave(&ha->hardware_lock, flags); 1916 1917 /* Does F/W have an IOCBs for this request */ 1918 res = qlt_check_reserve_free_req(vha, full_req_cnt); 1919 if (unlikely(res)) 1920 goto out_unmap_unlock; 1921 1922 res = qlt_24xx_build_ctio_pkt(&prm, vha); 1923 if (unlikely(res != 0)) 1924 goto out_unmap_unlock; 1925 1926 1927 pkt = (struct ctio7_to_24xx *)prm.pkt; 1928 1929 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 1930 pkt->u.status0.flags |= 1931 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 1932 CTIO7_FLAGS_STATUS_MODE_0); 1933 1934 qlt_load_data_segments(&prm, vha); 1935 1936 if (prm.add_status_pkt == 0) { 1937 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1938 pkt->u.status0.scsi_status = 1939 cpu_to_le16(prm.rq_result); 1940 pkt->u.status0.residual = 1941 cpu_to_le32(prm.residual); 1942 pkt->u.status0.flags |= __constant_cpu_to_le16( 1943 CTIO7_FLAGS_SEND_STATUS); 1944 if (qlt_need_explicit_conf(ha, cmd, 0)) { 1945 pkt->u.status0.flags |= 1946 __constant_cpu_to_le16( 1947 CTIO7_FLAGS_EXPLICIT_CONFORM | 1948 CTIO7_FLAGS_CONFORM_REQ); 1949 } 1950 } 1951 1952 } else { 1953 /* 1954 * We have already made sure that there is sufficient 1955 * amount of request entries to not drop HW lock in 1956 * req_pkt(). 1957 */ 1958 struct ctio7_to_24xx *ctio = 1959 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 1960 1961 ql_dbg(ql_dbg_tgt, vha, 0xe019, 1962 "Building additional status packet\n"); 1963 1964 memcpy(ctio, pkt, sizeof(*ctio)); 1965 ctio->entry_count = 1; 1966 ctio->dseg_count = 0; 1967 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 1968 CTIO7_FLAGS_DATA_IN); 1969 1970 /* Real finish is ctio_m1's finish */ 1971 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 1972 pkt->u.status0.flags |= __constant_cpu_to_le16( 1973 CTIO7_FLAGS_DONT_RET_CTIO); 1974 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 1975 &prm); 1976 pr_debug("Status CTIO7: %p\n", ctio); 1977 } 1978 } else 1979 qlt_24xx_init_ctio_to_isp(pkt, &prm); 1980 1981 1982 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 1983 1984 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 1985 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 1986 pkt, scsi_status); 1987 1988 qla2x00_start_iocbs(vha, vha->req); 1989 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1990 1991 return 0; 1992 1993 out_unmap_unlock: 1994 if (cmd->sg_mapped) 1995 qlt_unmap_sg(vha, cmd); 1996 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1997 1998 return res; 1999 } 2000 EXPORT_SYMBOL(qlt_xmit_response); 2001 2002 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2003 { 2004 struct ctio7_to_24xx *pkt; 2005 struct scsi_qla_host *vha = cmd->vha; 2006 struct qla_hw_data *ha = vha->hw; 2007 struct qla_tgt *tgt = cmd->tgt; 2008 struct qla_tgt_prm prm; 2009 unsigned long flags; 2010 int res = 0; 2011 2012 memset(&prm, 0, sizeof(prm)); 2013 prm.cmd = cmd; 2014 prm.tgt = tgt; 2015 prm.sg = NULL; 2016 prm.req_cnt = 1; 2017 2018 /* Send marker if required */ 2019 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2020 return -EIO; 2021 2022 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", 2023 (int)vha->vp_idx); 2024 2025 /* Calculate number of entries and segments required */ 2026 if (qlt_pci_map_calc_cnt(&prm) != 0) 2027 return -EAGAIN; 2028 2029 spin_lock_irqsave(&ha->hardware_lock, flags); 2030 2031 /* Does F/W have an IOCBs for this request */ 2032 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2033 if (res != 0) 2034 goto out_unlock_free_unmap; 2035 2036 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2037 if (unlikely(res != 0)) 2038 goto out_unlock_free_unmap; 2039 pkt = (struct ctio7_to_24xx *)prm.pkt; 2040 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2041 CTIO7_FLAGS_STATUS_MODE_0); 2042 qlt_load_data_segments(&prm, vha); 2043 2044 cmd->state = QLA_TGT_STATE_NEED_DATA; 2045 2046 qla2x00_start_iocbs(vha, vha->req); 2047 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2048 2049 return res; 2050 2051 out_unlock_free_unmap: 2052 if (cmd->sg_mapped) 2053 qlt_unmap_sg(vha, cmd); 2054 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2055 2056 return res; 2057 } 2058 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2059 2060 /* If hardware_lock held on entry, might drop it, then reaquire */ 2061 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2062 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2063 struct qla_tgt_cmd *cmd, 2064 struct atio_from_isp *atio) 2065 { 2066 struct ctio7_to_24xx *ctio24; 2067 struct qla_hw_data *ha = vha->hw; 2068 request_t *pkt; 2069 int ret = 0; 2070 2071 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2072 2073 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2074 if (pkt == NULL) { 2075 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2076 "qla_target(%d): %s failed: unable to allocate " 2077 "request packet\n", vha->vp_idx, __func__); 2078 return -ENOMEM; 2079 } 2080 2081 if (cmd != NULL) { 2082 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2083 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2084 "qla_target(%d): Terminating cmd %p with " 2085 "incorrect state %d\n", vha->vp_idx, cmd, 2086 cmd->state); 2087 } else 2088 ret = 1; 2089 } 2090 2091 pkt->entry_count = 1; 2092 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2093 2094 ctio24 = (struct ctio7_to_24xx *)pkt; 2095 ctio24->entry_type = CTIO_TYPE7; 2096 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2097 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2098 ctio24->vp_index = vha->vp_idx; 2099 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2100 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2101 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2102 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2103 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2104 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2105 CTIO7_FLAGS_TERMINATE); 2106 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2107 2108 /* Most likely, it isn't needed */ 2109 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2110 &atio->u.isp24.fcp_cmnd.add_cdb[ 2111 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2112 if (ctio24->u.status1.residual != 0) 2113 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2114 2115 qla2x00_start_iocbs(vha, vha->req); 2116 return ret; 2117 } 2118 2119 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2120 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2121 { 2122 unsigned long flags; 2123 int rc; 2124 2125 if (qlt_issue_marker(vha, ha_locked) < 0) 2126 return; 2127 2128 if (ha_locked) { 2129 rc = __qlt_send_term_exchange(vha, cmd, atio); 2130 goto done; 2131 } 2132 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2133 rc = __qlt_send_term_exchange(vha, cmd, atio); 2134 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2135 done: 2136 if (rc == 1) { 2137 if (!ha_locked && !in_interrupt()) 2138 msleep(250); /* just in case */ 2139 2140 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2141 } 2142 } 2143 2144 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2145 { 2146 BUG_ON(cmd->sg_mapped); 2147 2148 if (unlikely(cmd->free_sg)) 2149 kfree(cmd->sg); 2150 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2151 } 2152 EXPORT_SYMBOL(qlt_free_cmd); 2153 2154 /* ha->hardware_lock supposed to be held on entry */ 2155 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2156 struct qla_tgt_cmd *cmd, void *ctio) 2157 { 2158 struct qla_tgt_srr_ctio *sc; 2159 struct qla_hw_data *ha = vha->hw; 2160 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2161 struct qla_tgt_srr_imm *imm; 2162 2163 tgt->ctio_srr_id++; 2164 2165 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2166 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2167 2168 if (!ctio) { 2169 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2170 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2171 vha->vp_idx); 2172 return -EINVAL; 2173 } 2174 2175 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2176 if (sc != NULL) { 2177 sc->cmd = cmd; 2178 /* IRQ is already OFF */ 2179 spin_lock(&tgt->srr_lock); 2180 sc->srr_id = tgt->ctio_srr_id; 2181 list_add_tail(&sc->srr_list_entry, 2182 &tgt->srr_ctio_list); 2183 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2184 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2185 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2186 int found = 0; 2187 list_for_each_entry(imm, &tgt->srr_imm_list, 2188 srr_list_entry) { 2189 if (imm->srr_id == sc->srr_id) { 2190 found = 1; 2191 break; 2192 } 2193 } 2194 if (found) { 2195 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2196 "Scheduling srr work\n"); 2197 schedule_work(&tgt->srr_work); 2198 } else { 2199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2200 "qla_target(%d): imm_srr_id " 2201 "== ctio_srr_id (%d), but there is no " 2202 "corresponding SRR IMM, deleting CTIO " 2203 "SRR %p\n", vha->vp_idx, 2204 tgt->ctio_srr_id, sc); 2205 list_del(&sc->srr_list_entry); 2206 spin_unlock(&tgt->srr_lock); 2207 2208 kfree(sc); 2209 return -EINVAL; 2210 } 2211 } 2212 spin_unlock(&tgt->srr_lock); 2213 } else { 2214 struct qla_tgt_srr_imm *ti; 2215 2216 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2217 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2218 vha->vp_idx); 2219 spin_lock(&tgt->srr_lock); 2220 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2221 srr_list_entry) { 2222 if (imm->srr_id == tgt->ctio_srr_id) { 2223 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2224 "IMM SRR %p deleted (id %d)\n", 2225 imm, imm->srr_id); 2226 list_del(&imm->srr_list_entry); 2227 qlt_reject_free_srr_imm(vha, imm, 1); 2228 } 2229 } 2230 spin_unlock(&tgt->srr_lock); 2231 2232 return -ENOMEM; 2233 } 2234 2235 return 0; 2236 } 2237 2238 /* 2239 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2240 */ 2241 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2242 struct qla_tgt_cmd *cmd, uint32_t status) 2243 { 2244 int term = 0; 2245 2246 if (ctio != NULL) { 2247 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2248 term = !(c->flags & 2249 __constant_cpu_to_le16(OF_TERM_EXCH)); 2250 } else 2251 term = 1; 2252 2253 if (term) 2254 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2255 2256 return term; 2257 } 2258 2259 /* ha->hardware_lock supposed to be held on entry */ 2260 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2261 uint32_t handle) 2262 { 2263 struct qla_hw_data *ha = vha->hw; 2264 2265 handle--; 2266 if (ha->tgt.cmds[handle] != NULL) { 2267 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2268 ha->tgt.cmds[handle] = NULL; 2269 return cmd; 2270 } else 2271 return NULL; 2272 } 2273 2274 /* ha->hardware_lock supposed to be held on entry */ 2275 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2276 uint32_t handle, void *ctio) 2277 { 2278 struct qla_tgt_cmd *cmd = NULL; 2279 2280 /* Clear out internal marks */ 2281 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2282 CTIO_INTERMEDIATE_HANDLE_MARK); 2283 2284 if (handle != QLA_TGT_NULL_HANDLE) { 2285 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2286 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2287 "SKIP_HANDLE CTIO\n"); 2288 return NULL; 2289 } 2290 /* handle-1 is actually used */ 2291 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2292 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2293 "qla_target(%d): Wrong handle %x received\n", 2294 vha->vp_idx, handle); 2295 return NULL; 2296 } 2297 cmd = qlt_get_cmd(vha, handle); 2298 if (unlikely(cmd == NULL)) { 2299 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2300 "qla_target(%d): Suspicious: unable to " 2301 "find the command with handle %x\n", vha->vp_idx, 2302 handle); 2303 return NULL; 2304 } 2305 } else if (ctio != NULL) { 2306 /* We can't get loop ID from CTIO7 */ 2307 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2308 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2309 "support NULL handles\n", vha->vp_idx); 2310 return NULL; 2311 } 2312 2313 return cmd; 2314 } 2315 2316 /* 2317 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2318 */ 2319 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2320 uint32_t status, void *ctio) 2321 { 2322 struct qla_hw_data *ha = vha->hw; 2323 struct se_cmd *se_cmd; 2324 struct target_core_fabric_ops *tfo; 2325 struct qla_tgt_cmd *cmd; 2326 2327 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2328 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2329 vha->vp_idx, ctio, status, handle); 2330 2331 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2332 /* That could happen only in case of an error/reset/abort */ 2333 if (status != CTIO_SUCCESS) { 2334 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2335 "Intermediate CTIO received" 2336 " (status %x)\n", status); 2337 } 2338 return; 2339 } 2340 2341 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2342 if (cmd == NULL) 2343 return; 2344 2345 se_cmd = &cmd->se_cmd; 2346 tfo = se_cmd->se_tfo; 2347 2348 if (cmd->sg_mapped) 2349 qlt_unmap_sg(vha, cmd); 2350 2351 if (unlikely(status != CTIO_SUCCESS)) { 2352 switch (status & 0xFFFF) { 2353 case CTIO_LIP_RESET: 2354 case CTIO_TARGET_RESET: 2355 case CTIO_ABORTED: 2356 case CTIO_TIMEOUT: 2357 case CTIO_INVALID_RX_ID: 2358 /* They are OK */ 2359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2360 "qla_target(%d): CTIO with " 2361 "status %#x received, state %x, se_cmd %p, " 2362 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2363 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2364 status, cmd->state, se_cmd); 2365 break; 2366 2367 case CTIO_PORT_LOGGED_OUT: 2368 case CTIO_PORT_UNAVAILABLE: 2369 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2370 "qla_target(%d): CTIO with PORT LOGGED " 2371 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2372 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2373 status, cmd->state, se_cmd); 2374 break; 2375 2376 case CTIO_SRR_RECEIVED: 2377 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2378 "qla_target(%d): CTIO with SRR_RECEIVED" 2379 " status %x received (state %x, se_cmd %p)\n", 2380 vha->vp_idx, status, cmd->state, se_cmd); 2381 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2382 break; 2383 else 2384 return; 2385 2386 default: 2387 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 2388 "qla_target(%d): CTIO with error status " 2389 "0x%x received (state %x, se_cmd %p\n", 2390 vha->vp_idx, status, cmd->state, se_cmd); 2391 break; 2392 } 2393 2394 if (cmd->state != QLA_TGT_STATE_NEED_DATA) 2395 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 2396 return; 2397 } 2398 2399 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 2400 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 2401 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2402 int rx_status = 0; 2403 2404 cmd->state = QLA_TGT_STATE_DATA_IN; 2405 2406 if (unlikely(status != CTIO_SUCCESS)) 2407 rx_status = -EIO; 2408 else 2409 cmd->write_data_transferred = 1; 2410 2411 ql_dbg(ql_dbg_tgt, vha, 0xe020, 2412 "Data received, context %x, rx_status %d\n", 2413 0x0, rx_status); 2414 2415 ha->tgt.tgt_ops->handle_data(cmd); 2416 return; 2417 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 2418 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 2419 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 2420 } else { 2421 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 2422 "qla_target(%d): A command in state (%d) should " 2423 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 2424 } 2425 2426 if (unlikely(status != CTIO_SUCCESS)) { 2427 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 2428 dump_stack(); 2429 } 2430 2431 ha->tgt.tgt_ops->free_cmd(cmd); 2432 } 2433 2434 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2435 uint8_t task_codes) 2436 { 2437 int fcp_task_attr; 2438 2439 switch (task_codes) { 2440 case ATIO_SIMPLE_QUEUE: 2441 fcp_task_attr = MSG_SIMPLE_TAG; 2442 break; 2443 case ATIO_HEAD_OF_QUEUE: 2444 fcp_task_attr = MSG_HEAD_TAG; 2445 break; 2446 case ATIO_ORDERED_QUEUE: 2447 fcp_task_attr = MSG_ORDERED_TAG; 2448 break; 2449 case ATIO_ACA_QUEUE: 2450 fcp_task_attr = MSG_ACA_TAG; 2451 break; 2452 case ATIO_UNTAGGED: 2453 fcp_task_attr = MSG_SIMPLE_TAG; 2454 break; 2455 default: 2456 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 2457 "qla_target: unknown task code %x, use ORDERED instead\n", 2458 task_codes); 2459 fcp_task_attr = MSG_ORDERED_TAG; 2460 break; 2461 } 2462 2463 return fcp_task_attr; 2464 } 2465 2466 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 2467 uint8_t *); 2468 /* 2469 * Process context for I/O path into tcm_qla2xxx code 2470 */ 2471 static void qlt_do_work(struct work_struct *work) 2472 { 2473 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2474 scsi_qla_host_t *vha = cmd->vha; 2475 struct qla_hw_data *ha = vha->hw; 2476 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2477 struct qla_tgt_sess *sess = NULL; 2478 struct atio_from_isp *atio = &cmd->atio; 2479 unsigned char *cdb; 2480 unsigned long flags; 2481 uint32_t data_length; 2482 int ret, fcp_task_attr, data_dir, bidi = 0; 2483 2484 if (tgt->tgt_stop) 2485 goto out_term; 2486 2487 spin_lock_irqsave(&ha->hardware_lock, flags); 2488 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2489 atio->u.isp24.fcp_hdr.s_id); 2490 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ 2491 if (sess) 2492 kref_get(&sess->se_sess->sess_kref); 2493 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2494 2495 if (unlikely(!sess)) { 2496 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; 2497 2498 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 2499 "qla_target(%d): Unable to find wwn login" 2500 " (s_id %x:%x:%x), trying to create it manually\n", 2501 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 2502 2503 if (atio->u.raw.entry_count > 1) { 2504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 2505 "Dropping multy entry cmd %p\n", cmd); 2506 goto out_term; 2507 } 2508 2509 mutex_lock(&ha->tgt.tgt_mutex); 2510 sess = qlt_make_local_sess(vha, s_id); 2511 /* sess has an extra creation ref. */ 2512 mutex_unlock(&ha->tgt.tgt_mutex); 2513 2514 if (!sess) 2515 goto out_term; 2516 } 2517 2518 cmd->sess = sess; 2519 cmd->loop_id = sess->loop_id; 2520 cmd->conf_compl_supported = sess->conf_compl_supported; 2521 2522 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 2523 cmd->tag = atio->u.isp24.exchange_addr; 2524 cmd->unpacked_lun = scsilun_to_int( 2525 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 2526 2527 if (atio->u.isp24.fcp_cmnd.rddata && 2528 atio->u.isp24.fcp_cmnd.wrdata) { 2529 bidi = 1; 2530 data_dir = DMA_TO_DEVICE; 2531 } else if (atio->u.isp24.fcp_cmnd.rddata) 2532 data_dir = DMA_FROM_DEVICE; 2533 else if (atio->u.isp24.fcp_cmnd.wrdata) 2534 data_dir = DMA_TO_DEVICE; 2535 else 2536 data_dir = DMA_NONE; 2537 2538 fcp_task_attr = qlt_get_fcp_task_attr(vha, 2539 atio->u.isp24.fcp_cmnd.task_attr); 2540 data_length = be32_to_cpu(get_unaligned((uint32_t *) 2541 &atio->u.isp24.fcp_cmnd.add_cdb[ 2542 atio->u.isp24.fcp_cmnd.add_cdb_len])); 2543 2544 ql_dbg(ql_dbg_tgt, vha, 0xe022, 2545 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", 2546 cmd, cmd->unpacked_lun, cmd->tag); 2547 2548 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 2549 fcp_task_attr, data_dir, bidi); 2550 if (ret != 0) 2551 goto out_term; 2552 /* 2553 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 2554 */ 2555 spin_lock_irqsave(&ha->hardware_lock, flags); 2556 ha->tgt.tgt_ops->put_sess(sess); 2557 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2558 return; 2559 2560 out_term: 2561 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 2562 /* 2563 * cmd has not sent to target yet, so pass NULL as the second 2564 * argument to qlt_send_term_exchange() and free the memory here. 2565 */ 2566 spin_lock_irqsave(&ha->hardware_lock, flags); 2567 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 2568 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2569 if (sess) 2570 ha->tgt.tgt_ops->put_sess(sess); 2571 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2572 } 2573 2574 /* ha->hardware_lock supposed to be held on entry */ 2575 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2576 struct atio_from_isp *atio) 2577 { 2578 struct qla_hw_data *ha = vha->hw; 2579 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2580 struct qla_tgt_cmd *cmd; 2581 2582 if (unlikely(tgt->tgt_stop)) { 2583 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 2584 "New command while device %p is shutting down\n", tgt); 2585 return -EFAULT; 2586 } 2587 2588 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 2589 if (!cmd) { 2590 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 2591 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 2592 return -ENOMEM; 2593 } 2594 2595 INIT_LIST_HEAD(&cmd->cmd_list); 2596 2597 memcpy(&cmd->atio, atio, sizeof(*atio)); 2598 cmd->state = QLA_TGT_STATE_NEW; 2599 cmd->tgt = ha->tgt.qla_tgt; 2600 cmd->vha = vha; 2601 2602 INIT_WORK(&cmd->work, qlt_do_work); 2603 queue_work(qla_tgt_wq, &cmd->work); 2604 return 0; 2605 2606 } 2607 2608 /* ha->hardware_lock supposed to be held on entry */ 2609 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 2610 int fn, void *iocb, int flags) 2611 { 2612 struct scsi_qla_host *vha = sess->vha; 2613 struct qla_hw_data *ha = vha->hw; 2614 struct qla_tgt_mgmt_cmd *mcmd; 2615 int res; 2616 uint8_t tmr_func; 2617 2618 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2619 if (!mcmd) { 2620 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 2621 "qla_target(%d): Allocation of management " 2622 "command failed, some commands and their data could " 2623 "leak\n", vha->vp_idx); 2624 return -ENOMEM; 2625 } 2626 memset(mcmd, 0, sizeof(*mcmd)); 2627 mcmd->sess = sess; 2628 2629 if (iocb) { 2630 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2631 sizeof(mcmd->orig_iocb.imm_ntfy)); 2632 } 2633 mcmd->tmr_func = fn; 2634 mcmd->flags = flags; 2635 2636 switch (fn) { 2637 case QLA_TGT_CLEAR_ACA: 2638 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 2639 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 2640 tmr_func = TMR_CLEAR_ACA; 2641 break; 2642 2643 case QLA_TGT_TARGET_RESET: 2644 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 2645 "qla_target(%d): TARGET_RESET received\n", 2646 sess->vha->vp_idx); 2647 tmr_func = TMR_TARGET_WARM_RESET; 2648 break; 2649 2650 case QLA_TGT_LUN_RESET: 2651 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 2652 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 2653 tmr_func = TMR_LUN_RESET; 2654 break; 2655 2656 case QLA_TGT_CLEAR_TS: 2657 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 2658 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 2659 tmr_func = TMR_CLEAR_TASK_SET; 2660 break; 2661 2662 case QLA_TGT_ABORT_TS: 2663 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 2664 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 2665 tmr_func = TMR_ABORT_TASK_SET; 2666 break; 2667 #if 0 2668 case QLA_TGT_ABORT_ALL: 2669 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 2670 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 2671 sess->vha->vp_idx); 2672 tmr_func = 0; 2673 break; 2674 2675 case QLA_TGT_ABORT_ALL_SESS: 2676 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 2677 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 2678 sess->vha->vp_idx); 2679 tmr_func = 0; 2680 break; 2681 2682 case QLA_TGT_NEXUS_LOSS_SESS: 2683 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 2684 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 2685 sess->vha->vp_idx); 2686 tmr_func = 0; 2687 break; 2688 2689 case QLA_TGT_NEXUS_LOSS: 2690 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 2691 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 2692 tmr_func = 0; 2693 break; 2694 #endif 2695 default: 2696 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 2697 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 2698 sess->vha->vp_idx, fn); 2699 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2700 return -ENOSYS; 2701 } 2702 2703 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 2704 if (res != 0) { 2705 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 2706 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 2707 sess->vha->vp_idx, res); 2708 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2709 return -EFAULT; 2710 } 2711 2712 return 0; 2713 } 2714 2715 /* ha->hardware_lock supposed to be held on entry */ 2716 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 2717 { 2718 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2719 struct qla_hw_data *ha = vha->hw; 2720 struct qla_tgt *tgt; 2721 struct qla_tgt_sess *sess; 2722 uint32_t lun, unpacked_lun; 2723 int lun_size, fn; 2724 2725 tgt = ha->tgt.qla_tgt; 2726 2727 lun = a->u.isp24.fcp_cmnd.lun; 2728 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 2729 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 2730 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2731 a->u.isp24.fcp_hdr.s_id); 2732 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2733 2734 if (!sess) { 2735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 2736 "qla_target(%d): task mgmt fn 0x%x for " 2737 "non-existant session\n", vha->vp_idx, fn); 2738 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 2739 sizeof(struct atio_from_isp)); 2740 } 2741 2742 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 2743 } 2744 2745 /* ha->hardware_lock supposed to be held on entry */ 2746 static int __qlt_abort_task(struct scsi_qla_host *vha, 2747 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 2748 { 2749 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2750 struct qla_hw_data *ha = vha->hw; 2751 struct qla_tgt_mgmt_cmd *mcmd; 2752 uint32_t lun, unpacked_lun; 2753 int rc; 2754 2755 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2756 if (mcmd == NULL) { 2757 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 2758 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 2759 vha->vp_idx, __func__); 2760 return -ENOMEM; 2761 } 2762 memset(mcmd, 0, sizeof(*mcmd)); 2763 2764 mcmd->sess = sess; 2765 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2766 sizeof(mcmd->orig_iocb.imm_ntfy)); 2767 2768 lun = a->u.isp24.fcp_cmnd.lun; 2769 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2770 2771 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 2772 le16_to_cpu(iocb->u.isp2x.seq_id)); 2773 if (rc != 0) { 2774 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 2775 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2776 vha->vp_idx, rc); 2777 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2778 return -EFAULT; 2779 } 2780 2781 return 0; 2782 } 2783 2784 /* ha->hardware_lock supposed to be held on entry */ 2785 static int qlt_abort_task(struct scsi_qla_host *vha, 2786 struct imm_ntfy_from_isp *iocb) 2787 { 2788 struct qla_hw_data *ha = vha->hw; 2789 struct qla_tgt_sess *sess; 2790 int loop_id; 2791 2792 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 2793 2794 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 2795 if (sess == NULL) { 2796 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2797 "qla_target(%d): task abort for unexisting " 2798 "session\n", vha->vp_idx); 2799 return qlt_sched_sess_work(ha->tgt.qla_tgt, 2800 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2801 } 2802 2803 return __qlt_abort_task(vha, iocb, sess); 2804 } 2805 2806 /* 2807 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2808 */ 2809 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2810 struct imm_ntfy_from_isp *iocb) 2811 { 2812 struct qla_hw_data *ha = vha->hw; 2813 int res = 0; 2814 2815 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2816 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 2817 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 2818 2819 switch (iocb->u.isp24.status_subcode) { 2820 case ELS_PLOGI: 2821 case ELS_FLOGI: 2822 case ELS_PRLI: 2823 case ELS_LOGO: 2824 case ELS_PRLO: 2825 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2826 break; 2827 case ELS_PDISC: 2828 case ELS_ADISC: 2829 { 2830 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2831 if (tgt->link_reinit_iocb_pending) { 2832 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2833 0, 0, 0, 0, 0, 0); 2834 tgt->link_reinit_iocb_pending = 0; 2835 } 2836 res = 1; /* send notify ack */ 2837 break; 2838 } 2839 2840 default: 2841 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 2842 "qla_target(%d): Unsupported ELS command %x " 2843 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 2844 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2845 break; 2846 } 2847 2848 return res; 2849 } 2850 2851 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 2852 { 2853 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 2854 size_t first_offset = 0, rem_offset = offset, tmp = 0; 2855 int i, sg_srr_cnt, bufflen = 0; 2856 2857 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 2858 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 2859 "cmd->sg_cnt: %u, direction: %d\n", 2860 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 2861 2862 /* 2863 * FIXME: Reject non zero SRR relative offset until we can test 2864 * this code properly. 2865 */ 2866 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 2867 return -1; 2868 2869 if (!cmd->sg || !cmd->sg_cnt) { 2870 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 2871 "Missing cmd->sg or zero cmd->sg_cnt in" 2872 " qla_tgt_set_data_offset\n"); 2873 return -EINVAL; 2874 } 2875 /* 2876 * Walk the current cmd->sg list until we locate the new sg_srr_start 2877 */ 2878 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 2879 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 2880 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 2881 i, sg, sg_page(sg), sg->length, sg->offset); 2882 2883 if ((sg->length + tmp) > offset) { 2884 first_offset = rem_offset; 2885 sg_srr_start = sg; 2886 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 2887 "Found matching sg[%d], using %p as sg_srr_start, " 2888 "and using first_offset: %zu\n", i, sg, 2889 first_offset); 2890 break; 2891 } 2892 tmp += sg->length; 2893 rem_offset -= sg->length; 2894 } 2895 2896 if (!sg_srr_start) { 2897 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 2898 "Unable to locate sg_srr_start for offset: %u\n", offset); 2899 return -EINVAL; 2900 } 2901 sg_srr_cnt = (cmd->sg_cnt - i); 2902 2903 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 2904 if (!sg_srr) { 2905 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 2906 "Unable to allocate sgp\n"); 2907 return -ENOMEM; 2908 } 2909 sg_init_table(sg_srr, sg_srr_cnt); 2910 sgp = &sg_srr[0]; 2911 /* 2912 * Walk the remaining list for sg_srr_start, mapping to the newly 2913 * allocated sg_srr taking first_offset into account. 2914 */ 2915 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 2916 if (first_offset) { 2917 sg_set_page(sgp, sg_page(sg), 2918 (sg->length - first_offset), first_offset); 2919 first_offset = 0; 2920 } else { 2921 sg_set_page(sgp, sg_page(sg), sg->length, 0); 2922 } 2923 bufflen += sgp->length; 2924 2925 sgp = sg_next(sgp); 2926 if (!sgp) 2927 break; 2928 } 2929 2930 cmd->sg = sg_srr; 2931 cmd->sg_cnt = sg_srr_cnt; 2932 cmd->bufflen = bufflen; 2933 cmd->offset += offset; 2934 cmd->free_sg = 1; 2935 2936 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 2937 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 2938 cmd->sg_cnt); 2939 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 2940 cmd->bufflen); 2941 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 2942 cmd->offset); 2943 2944 if (cmd->sg_cnt < 0) 2945 BUG(); 2946 2947 if (cmd->bufflen < 0) 2948 BUG(); 2949 2950 return 0; 2951 } 2952 2953 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 2954 uint32_t srr_rel_offs, int *xmit_type) 2955 { 2956 int res = 0, rel_offs; 2957 2958 rel_offs = srr_rel_offs - cmd->offset; 2959 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 2960 srr_rel_offs, rel_offs); 2961 2962 *xmit_type = QLA_TGT_XMIT_ALL; 2963 2964 if (rel_offs < 0) { 2965 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 2966 "qla_target(%d): SRR rel_offs (%d) < 0", 2967 cmd->vha->vp_idx, rel_offs); 2968 res = -1; 2969 } else if (rel_offs == cmd->bufflen) 2970 *xmit_type = QLA_TGT_XMIT_STATUS; 2971 else if (rel_offs > 0) 2972 res = qlt_set_data_offset(cmd, rel_offs); 2973 2974 return res; 2975 } 2976 2977 /* No locks, thread context */ 2978 static void qlt_handle_srr(struct scsi_qla_host *vha, 2979 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 2980 { 2981 struct imm_ntfy_from_isp *ntfy = 2982 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 2983 struct qla_hw_data *ha = vha->hw; 2984 struct qla_tgt_cmd *cmd = sctio->cmd; 2985 struct se_cmd *se_cmd = &cmd->se_cmd; 2986 unsigned long flags; 2987 int xmit_type = 0, resp = 0; 2988 uint32_t offset; 2989 uint16_t srr_ui; 2990 2991 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 2992 srr_ui = ntfy->u.isp24.srr_ui; 2993 2994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 2995 cmd, srr_ui); 2996 2997 switch (srr_ui) { 2998 case SRR_IU_STATUS: 2999 spin_lock_irqsave(&ha->hardware_lock, flags); 3000 qlt_send_notify_ack(vha, ntfy, 3001 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3002 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3003 xmit_type = QLA_TGT_XMIT_STATUS; 3004 resp = 1; 3005 break; 3006 case SRR_IU_DATA_IN: 3007 if (!cmd->sg || !cmd->sg_cnt) { 3008 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3009 "Unable to process SRR_IU_DATA_IN due to" 3010 " missing cmd->sg, state: %d\n", cmd->state); 3011 dump_stack(); 3012 goto out_reject; 3013 } 3014 if (se_cmd->scsi_status != 0) { 3015 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3016 "Rejecting SRR_IU_DATA_IN with non GOOD " 3017 "scsi_status\n"); 3018 goto out_reject; 3019 } 3020 cmd->bufflen = se_cmd->data_length; 3021 3022 if (qlt_has_data(cmd)) { 3023 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3024 goto out_reject; 3025 spin_lock_irqsave(&ha->hardware_lock, flags); 3026 qlt_send_notify_ack(vha, ntfy, 3027 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3028 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3029 resp = 1; 3030 } else { 3031 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3032 "qla_target(%d): SRR for in data for cmd " 3033 "without them (tag %d, SCSI status %d), " 3034 "reject", vha->vp_idx, cmd->tag, 3035 cmd->se_cmd.scsi_status); 3036 goto out_reject; 3037 } 3038 break; 3039 case SRR_IU_DATA_OUT: 3040 if (!cmd->sg || !cmd->sg_cnt) { 3041 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3042 "Unable to process SRR_IU_DATA_OUT due to" 3043 " missing cmd->sg\n"); 3044 dump_stack(); 3045 goto out_reject; 3046 } 3047 if (se_cmd->scsi_status != 0) { 3048 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3049 "Rejecting SRR_IU_DATA_OUT" 3050 " with non GOOD scsi_status\n"); 3051 goto out_reject; 3052 } 3053 cmd->bufflen = se_cmd->data_length; 3054 3055 if (qlt_has_data(cmd)) { 3056 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3057 goto out_reject; 3058 spin_lock_irqsave(&ha->hardware_lock, flags); 3059 qlt_send_notify_ack(vha, ntfy, 3060 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3061 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3062 if (xmit_type & QLA_TGT_XMIT_DATA) 3063 qlt_rdy_to_xfer(cmd); 3064 } else { 3065 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3066 "qla_target(%d): SRR for out data for cmd " 3067 "without them (tag %d, SCSI status %d), " 3068 "reject", vha->vp_idx, cmd->tag, 3069 cmd->se_cmd.scsi_status); 3070 goto out_reject; 3071 } 3072 break; 3073 default: 3074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3075 "qla_target(%d): Unknown srr_ui value %x", 3076 vha->vp_idx, srr_ui); 3077 goto out_reject; 3078 } 3079 3080 /* Transmit response in case of status and data-in cases */ 3081 if (resp) 3082 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3083 3084 return; 3085 3086 out_reject: 3087 spin_lock_irqsave(&ha->hardware_lock, flags); 3088 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3089 NOTIFY_ACK_SRR_FLAGS_REJECT, 3090 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3091 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3092 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3093 cmd->state = QLA_TGT_STATE_DATA_IN; 3094 dump_stack(); 3095 } else 3096 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3097 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3098 } 3099 3100 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3101 struct qla_tgt_srr_imm *imm, int ha_locked) 3102 { 3103 struct qla_hw_data *ha = vha->hw; 3104 unsigned long flags = 0; 3105 3106 if (!ha_locked) 3107 spin_lock_irqsave(&ha->hardware_lock, flags); 3108 3109 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3110 NOTIFY_ACK_SRR_FLAGS_REJECT, 3111 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3112 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3113 3114 if (!ha_locked) 3115 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3116 3117 kfree(imm); 3118 } 3119 3120 static void qlt_handle_srr_work(struct work_struct *work) 3121 { 3122 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3123 struct scsi_qla_host *vha = tgt->vha; 3124 struct qla_tgt_srr_ctio *sctio; 3125 unsigned long flags; 3126 3127 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3128 tgt); 3129 3130 restart: 3131 spin_lock_irqsave(&tgt->srr_lock, flags); 3132 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3133 struct qla_tgt_srr_imm *imm, *i, *ti; 3134 struct qla_tgt_cmd *cmd; 3135 struct se_cmd *se_cmd; 3136 3137 imm = NULL; 3138 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3139 srr_list_entry) { 3140 if (i->srr_id == sctio->srr_id) { 3141 list_del(&i->srr_list_entry); 3142 if (imm) { 3143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3144 "qla_target(%d): There must be " 3145 "only one IMM SRR per CTIO SRR " 3146 "(IMM SRR %p, id %d, CTIO %p\n", 3147 vha->vp_idx, i, i->srr_id, sctio); 3148 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3149 } else 3150 imm = i; 3151 } 3152 } 3153 3154 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3155 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3156 sctio->srr_id); 3157 3158 if (imm == NULL) { 3159 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3160 "Not found matching IMM for SRR CTIO (id %d)\n", 3161 sctio->srr_id); 3162 continue; 3163 } else 3164 list_del(&sctio->srr_list_entry); 3165 3166 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3167 3168 cmd = sctio->cmd; 3169 /* 3170 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3171 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3172 * logic.. 3173 */ 3174 cmd->offset = 0; 3175 if (cmd->free_sg) { 3176 kfree(cmd->sg); 3177 cmd->sg = NULL; 3178 cmd->free_sg = 0; 3179 } 3180 se_cmd = &cmd->se_cmd; 3181 3182 cmd->sg_cnt = se_cmd->t_data_nents; 3183 cmd->sg = se_cmd->t_data_sg; 3184 3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3186 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3187 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3188 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); 3189 3190 qlt_handle_srr(vha, sctio, imm); 3191 3192 kfree(imm); 3193 kfree(sctio); 3194 goto restart; 3195 } 3196 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3197 } 3198 3199 /* ha->hardware_lock supposed to be held on entry */ 3200 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3201 struct imm_ntfy_from_isp *iocb) 3202 { 3203 struct qla_tgt_srr_imm *imm; 3204 struct qla_hw_data *ha = vha->hw; 3205 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3206 struct qla_tgt_srr_ctio *sctio; 3207 3208 tgt->imm_srr_id++; 3209 3210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3211 vha->vp_idx); 3212 3213 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3214 if (imm != NULL) { 3215 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3216 3217 /* IRQ is already OFF */ 3218 spin_lock(&tgt->srr_lock); 3219 imm->srr_id = tgt->imm_srr_id; 3220 list_add_tail(&imm->srr_list_entry, 3221 &tgt->srr_imm_list); 3222 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3223 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3224 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3225 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3226 int found = 0; 3227 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3228 srr_list_entry) { 3229 if (sctio->srr_id == imm->srr_id) { 3230 found = 1; 3231 break; 3232 } 3233 } 3234 if (found) { 3235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3236 "Scheduling srr work\n"); 3237 schedule_work(&tgt->srr_work); 3238 } else { 3239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3240 "qla_target(%d): imm_srr_id " 3241 "== ctio_srr_id (%d), but there is no " 3242 "corresponding SRR CTIO, deleting IMM " 3243 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3244 imm); 3245 list_del(&imm->srr_list_entry); 3246 3247 kfree(imm); 3248 3249 spin_unlock(&tgt->srr_lock); 3250 goto out_reject; 3251 } 3252 } 3253 spin_unlock(&tgt->srr_lock); 3254 } else { 3255 struct qla_tgt_srr_ctio *ts; 3256 3257 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3258 "qla_target(%d): Unable to allocate SRR IMM " 3259 "entry, SRR request will be rejected\n", vha->vp_idx); 3260 3261 /* IRQ is already OFF */ 3262 spin_lock(&tgt->srr_lock); 3263 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3264 srr_list_entry) { 3265 if (sctio->srr_id == tgt->imm_srr_id) { 3266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3267 "CTIO SRR %p deleted (id %d)\n", 3268 sctio, sctio->srr_id); 3269 list_del(&sctio->srr_list_entry); 3270 qlt_send_term_exchange(vha, sctio->cmd, 3271 &sctio->cmd->atio, 1); 3272 kfree(sctio); 3273 } 3274 } 3275 spin_unlock(&tgt->srr_lock); 3276 goto out_reject; 3277 } 3278 3279 return; 3280 3281 out_reject: 3282 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3283 NOTIFY_ACK_SRR_FLAGS_REJECT, 3284 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3285 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3286 } 3287 3288 /* 3289 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3290 */ 3291 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3292 struct imm_ntfy_from_isp *iocb) 3293 { 3294 struct qla_hw_data *ha = vha->hw; 3295 uint32_t add_flags = 0; 3296 int send_notify_ack = 1; 3297 uint16_t status; 3298 3299 status = le16_to_cpu(iocb->u.isp2x.status); 3300 switch (status) { 3301 case IMM_NTFY_LIP_RESET: 3302 { 3303 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3304 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3305 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 3306 iocb->u.isp24.status_subcode); 3307 3308 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3309 send_notify_ack = 0; 3310 break; 3311 } 3312 3313 case IMM_NTFY_LIP_LINK_REINIT: 3314 { 3315 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3316 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3317 "qla_target(%d): LINK REINIT (loop %#x, " 3318 "subcode %x)\n", vha->vp_idx, 3319 le16_to_cpu(iocb->u.isp24.nport_handle), 3320 iocb->u.isp24.status_subcode); 3321 if (tgt->link_reinit_iocb_pending) { 3322 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3323 0, 0, 0, 0, 0, 0); 3324 } 3325 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 3326 tgt->link_reinit_iocb_pending = 1; 3327 /* 3328 * QLogic requires to wait after LINK REINIT for possible 3329 * PDISC or ADISC ELS commands 3330 */ 3331 send_notify_ack = 0; 3332 break; 3333 } 3334 3335 case IMM_NTFY_PORT_LOGOUT: 3336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 3337 "qla_target(%d): Port logout (loop " 3338 "%#x, subcode %x)\n", vha->vp_idx, 3339 le16_to_cpu(iocb->u.isp24.nport_handle), 3340 iocb->u.isp24.status_subcode); 3341 3342 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 3343 send_notify_ack = 0; 3344 /* The sessions will be cleared in the callback, if needed */ 3345 break; 3346 3347 case IMM_NTFY_GLBL_TPRLO: 3348 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 3349 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 3350 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3351 send_notify_ack = 0; 3352 /* The sessions will be cleared in the callback, if needed */ 3353 break; 3354 3355 case IMM_NTFY_PORT_CONFIG: 3356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 3357 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 3358 status); 3359 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3360 send_notify_ack = 0; 3361 /* The sessions will be cleared in the callback, if needed */ 3362 break; 3363 3364 case IMM_NTFY_GLBL_LOGO: 3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 3366 "qla_target(%d): Link failure detected\n", 3367 vha->vp_idx); 3368 /* I_T nexus loss */ 3369 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3370 send_notify_ack = 0; 3371 break; 3372 3373 case IMM_NTFY_IOCB_OVERFLOW: 3374 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 3375 "qla_target(%d): Cannot provide requested " 3376 "capability (IOCB overflowed the immediate notify " 3377 "resource count)\n", vha->vp_idx); 3378 break; 3379 3380 case IMM_NTFY_ABORT_TASK: 3381 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 3382 "qla_target(%d): Abort Task (S %08x I %#x -> " 3383 "L %#x)\n", vha->vp_idx, 3384 le16_to_cpu(iocb->u.isp2x.seq_id), 3385 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 3386 le16_to_cpu(iocb->u.isp2x.lun)); 3387 if (qlt_abort_task(vha, iocb) == 0) 3388 send_notify_ack = 0; 3389 break; 3390 3391 case IMM_NTFY_RESOURCE: 3392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 3393 "qla_target(%d): Out of resources, host %ld\n", 3394 vha->vp_idx, vha->host_no); 3395 break; 3396 3397 case IMM_NTFY_MSG_RX: 3398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 3399 "qla_target(%d): Immediate notify task %x\n", 3400 vha->vp_idx, iocb->u.isp2x.task_flags); 3401 if (qlt_handle_task_mgmt(vha, iocb) == 0) 3402 send_notify_ack = 0; 3403 break; 3404 3405 case IMM_NTFY_ELS: 3406 if (qlt_24xx_handle_els(vha, iocb) == 0) 3407 send_notify_ack = 0; 3408 break; 3409 3410 case IMM_NTFY_SRR: 3411 qlt_prepare_srr_imm(vha, iocb); 3412 send_notify_ack = 0; 3413 break; 3414 3415 default: 3416 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 3417 "qla_target(%d): Received unknown immediate " 3418 "notify status %x\n", vha->vp_idx, status); 3419 break; 3420 } 3421 3422 if (send_notify_ack) 3423 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 3424 } 3425 3426 /* 3427 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3428 * This function sends busy to ISP 2xxx or 24xx. 3429 */ 3430 static void qlt_send_busy(struct scsi_qla_host *vha, 3431 struct atio_from_isp *atio, uint16_t status) 3432 { 3433 struct ctio7_to_24xx *ctio24; 3434 struct qla_hw_data *ha = vha->hw; 3435 request_t *pkt; 3436 struct qla_tgt_sess *sess = NULL; 3437 3438 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3439 atio->u.isp24.fcp_hdr.s_id); 3440 if (!sess) { 3441 qlt_send_term_exchange(vha, NULL, atio, 1); 3442 return; 3443 } 3444 /* Sending marker isn't necessary, since we called from ISR */ 3445 3446 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3447 if (!pkt) { 3448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 3449 "qla_target(%d): %s failed: unable to allocate " 3450 "request packet", vha->vp_idx, __func__); 3451 return; 3452 } 3453 3454 pkt->entry_count = 1; 3455 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3456 3457 ctio24 = (struct ctio7_to_24xx *)pkt; 3458 ctio24->entry_type = CTIO_TYPE7; 3459 ctio24->nport_handle = sess->loop_id; 3460 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 3461 ctio24->vp_index = vha->vp_idx; 3462 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3463 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3464 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3465 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3466 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3467 __constant_cpu_to_le16( 3468 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 3469 CTIO7_FLAGS_DONT_RET_CTIO); 3470 /* 3471 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 3472 * if the explicit conformation is used. 3473 */ 3474 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 3475 ctio24->u.status1.scsi_status = cpu_to_le16(status); 3476 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3477 &atio->u.isp24.fcp_cmnd.add_cdb[ 3478 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3479 if (ctio24->u.status1.residual != 0) 3480 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3481 3482 qla2x00_start_iocbs(vha, vha->req); 3483 } 3484 3485 /* ha->hardware_lock supposed to be held on entry */ 3486 /* called via callback from qla2xxx */ 3487 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 3488 struct atio_from_isp *atio) 3489 { 3490 struct qla_hw_data *ha = vha->hw; 3491 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3492 int rc; 3493 3494 if (unlikely(tgt == NULL)) { 3495 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 3496 "ATIO pkt, but no tgt (ha %p)", ha); 3497 return; 3498 } 3499 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 3500 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 3501 vha->vp_idx, atio, atio->u.raw.entry_type, 3502 atio->u.raw.entry_count); 3503 /* 3504 * In tgt_stop mode we also should allow all requests to pass. 3505 * Otherwise, some commands can stuck. 3506 */ 3507 3508 tgt->irq_cmd_count++; 3509 3510 switch (atio->u.raw.entry_type) { 3511 case ATIO_TYPE7: 3512 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 3513 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " 3514 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", 3515 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 3516 atio->u.isp24.fcp_cmnd.rddata, 3517 atio->u.isp24.fcp_cmnd.wrdata, 3518 atio->u.isp24.fcp_cmnd.add_cdb_len, 3519 be32_to_cpu(get_unaligned((uint32_t *) 3520 &atio->u.isp24.fcp_cmnd.add_cdb[ 3521 atio->u.isp24.fcp_cmnd.add_cdb_len])), 3522 atio->u.isp24.fcp_hdr.s_id[0], 3523 atio->u.isp24.fcp_hdr.s_id[1], 3524 atio->u.isp24.fcp_hdr.s_id[2]); 3525 3526 if (unlikely(atio->u.isp24.exchange_addr == 3527 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 3528 ql_dbg(ql_dbg_tgt, vha, 0xe058, 3529 "qla_target(%d): ATIO_TYPE7 " 3530 "received with UNKNOWN exchange address, " 3531 "sending QUEUE_FULL\n", vha->vp_idx); 3532 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 3533 break; 3534 } 3535 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 3536 rc = qlt_handle_cmd_for_atio(vha, atio); 3537 else 3538 rc = qlt_handle_task_mgmt(vha, atio); 3539 if (unlikely(rc != 0)) { 3540 if (rc == -ESRCH) { 3541 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3542 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3543 #else 3544 qlt_send_term_exchange(vha, NULL, atio, 1); 3545 #endif 3546 } else { 3547 if (tgt->tgt_stop) { 3548 ql_dbg(ql_dbg_tgt, vha, 0xe059, 3549 "qla_target: Unable to send " 3550 "command to target for req, " 3551 "ignoring.\n"); 3552 } else { 3553 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 3554 "qla_target(%d): Unable to send " 3555 "command to target, sending BUSY " 3556 "status.\n", vha->vp_idx); 3557 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3558 } 3559 } 3560 } 3561 break; 3562 3563 case IMMED_NOTIFY_TYPE: 3564 { 3565 if (unlikely(atio->u.isp2x.entry_status != 0)) { 3566 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 3567 "qla_target(%d): Received ATIO packet %x " 3568 "with error status %x\n", vha->vp_idx, 3569 atio->u.raw.entry_type, 3570 atio->u.isp2x.entry_status); 3571 break; 3572 } 3573 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 3574 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 3575 break; 3576 } 3577 3578 default: 3579 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 3580 "qla_target(%d): Received unknown ATIO atio " 3581 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 3582 break; 3583 } 3584 3585 tgt->irq_cmd_count--; 3586 } 3587 3588 /* ha->hardware_lock supposed to be held on entry */ 3589 /* called via callback from qla2xxx */ 3590 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3591 { 3592 struct qla_hw_data *ha = vha->hw; 3593 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3594 3595 if (unlikely(tgt == NULL)) { 3596 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 3597 "qla_target(%d): Response pkt %x received, but no " 3598 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 3599 return; 3600 } 3601 3602 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 3603 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 3604 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 3605 pkt->entry_count, pkt->entry_status, pkt->handle); 3606 3607 /* 3608 * In tgt_stop mode we also should allow all requests to pass. 3609 * Otherwise, some commands can stuck. 3610 */ 3611 3612 tgt->irq_cmd_count++; 3613 3614 switch (pkt->entry_type) { 3615 case CTIO_TYPE7: 3616 { 3617 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 3618 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", 3619 vha->vp_idx); 3620 qlt_do_ctio_completion(vha, entry->handle, 3621 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3622 entry); 3623 break; 3624 } 3625 3626 case ACCEPT_TGT_IO_TYPE: 3627 { 3628 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 3629 int rc; 3630 ql_dbg(ql_dbg_tgt, vha, 0xe031, 3631 "ACCEPT_TGT_IO instance %d status %04x " 3632 "lun %04x read/write %d data_length %04x " 3633 "target_id %02x rx_id %04x\n ", vha->vp_idx, 3634 le16_to_cpu(atio->u.isp2x.status), 3635 le16_to_cpu(atio->u.isp2x.lun), 3636 atio->u.isp2x.execution_codes, 3637 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 3638 atio), atio->u.isp2x.rx_id); 3639 if (atio->u.isp2x.status != 3640 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 3641 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 3642 "qla_target(%d): ATIO with error " 3643 "status %x received\n", vha->vp_idx, 3644 le16_to_cpu(atio->u.isp2x.status)); 3645 break; 3646 } 3647 ql_dbg(ql_dbg_tgt, vha, 0xe032, 3648 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 3649 atio->u.isp2x.cdb[0], (unsigned long 3650 int)sizeof(atio->u.isp2x.cdb)); 3651 3652 rc = qlt_handle_cmd_for_atio(vha, atio); 3653 if (unlikely(rc != 0)) { 3654 if (rc == -ESRCH) { 3655 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3656 qlt_send_busy(vha, atio, 0); 3657 #else 3658 qlt_send_term_exchange(vha, NULL, atio, 1); 3659 #endif 3660 } else { 3661 if (tgt->tgt_stop) { 3662 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 3663 "qla_target: Unable to send " 3664 "command to target, sending TERM " 3665 "EXCHANGE for rsp\n"); 3666 qlt_send_term_exchange(vha, NULL, 3667 atio, 1); 3668 } else { 3669 ql_dbg(ql_dbg_tgt, vha, 0xe060, 3670 "qla_target(%d): Unable to send " 3671 "command to target, sending BUSY " 3672 "status\n", vha->vp_idx); 3673 qlt_send_busy(vha, atio, 0); 3674 } 3675 } 3676 } 3677 } 3678 break; 3679 3680 case CONTINUE_TGT_IO_TYPE: 3681 { 3682 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3683 ql_dbg(ql_dbg_tgt, vha, 0xe033, 3684 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 3685 qlt_do_ctio_completion(vha, entry->handle, 3686 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3687 entry); 3688 break; 3689 } 3690 3691 case CTIO_A64_TYPE: 3692 { 3693 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3694 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 3695 vha->vp_idx); 3696 qlt_do_ctio_completion(vha, entry->handle, 3697 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3698 entry); 3699 break; 3700 } 3701 3702 case IMMED_NOTIFY_TYPE: 3703 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 3704 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 3705 break; 3706 3707 case NOTIFY_ACK_TYPE: 3708 if (tgt->notify_ack_expected > 0) { 3709 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 3710 ql_dbg(ql_dbg_tgt, vha, 0xe036, 3711 "NOTIFY_ACK seq %08x status %x\n", 3712 le16_to_cpu(entry->u.isp2x.seq_id), 3713 le16_to_cpu(entry->u.isp2x.status)); 3714 tgt->notify_ack_expected--; 3715 if (entry->u.isp2x.status != 3716 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 3717 ql_dbg(ql_dbg_tgt, vha, 0xe061, 3718 "qla_target(%d): NOTIFY_ACK " 3719 "failed %x\n", vha->vp_idx, 3720 le16_to_cpu(entry->u.isp2x.status)); 3721 } 3722 } else { 3723 ql_dbg(ql_dbg_tgt, vha, 0xe062, 3724 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 3725 vha->vp_idx); 3726 } 3727 break; 3728 3729 case ABTS_RECV_24XX: 3730 ql_dbg(ql_dbg_tgt, vha, 0xe037, 3731 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 3732 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 3733 break; 3734 3735 case ABTS_RESP_24XX: 3736 if (tgt->abts_resp_expected > 0) { 3737 struct abts_resp_from_24xx_fw *entry = 3738 (struct abts_resp_from_24xx_fw *)pkt; 3739 ql_dbg(ql_dbg_tgt, vha, 0xe038, 3740 "ABTS_RESP_24XX: compl_status %x\n", 3741 entry->compl_status); 3742 tgt->abts_resp_expected--; 3743 if (le16_to_cpu(entry->compl_status) != 3744 ABTS_RESP_COMPL_SUCCESS) { 3745 if ((entry->error_subcode1 == 0x1E) && 3746 (entry->error_subcode2 == 0)) { 3747 /* 3748 * We've got a race here: aborted 3749 * exchange not terminated, i.e. 3750 * response for the aborted command was 3751 * sent between the abort request was 3752 * received and processed. 3753 * Unfortunately, the firmware has a 3754 * silly requirement that all aborted 3755 * exchanges must be explicitely 3756 * terminated, otherwise it refuses to 3757 * send responses for the abort 3758 * requests. So, we have to 3759 * (re)terminate the exchange and retry 3760 * the abort response. 3761 */ 3762 qlt_24xx_retry_term_exchange(vha, 3763 entry); 3764 } else 3765 ql_dbg(ql_dbg_tgt, vha, 0xe063, 3766 "qla_target(%d): ABTS_RESP_24XX " 3767 "failed %x (subcode %x:%x)", 3768 vha->vp_idx, entry->compl_status, 3769 entry->error_subcode1, 3770 entry->error_subcode2); 3771 } 3772 } else { 3773 ql_dbg(ql_dbg_tgt, vha, 0xe064, 3774 "qla_target(%d): Unexpected ABTS_RESP_24XX " 3775 "received\n", vha->vp_idx); 3776 } 3777 break; 3778 3779 default: 3780 ql_dbg(ql_dbg_tgt, vha, 0xe065, 3781 "qla_target(%d): Received unknown response pkt " 3782 "type %x\n", vha->vp_idx, pkt->entry_type); 3783 break; 3784 } 3785 3786 tgt->irq_cmd_count--; 3787 } 3788 3789 /* 3790 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3791 */ 3792 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 3793 uint16_t *mailbox) 3794 { 3795 struct qla_hw_data *ha = vha->hw; 3796 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3797 int login_code; 3798 3799 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3800 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 3801 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 3802 ha->operating_mode, ha->current_topology); 3803 3804 if (!ha->tgt.tgt_ops) 3805 return; 3806 3807 if (unlikely(tgt == NULL)) { 3808 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 3809 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 3810 return; 3811 } 3812 3813 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 3814 IS_QLA2100(ha)) 3815 return; 3816 /* 3817 * In tgt_stop mode we also should allow all requests to pass. 3818 * Otherwise, some commands can stuck. 3819 */ 3820 3821 tgt->irq_cmd_count++; 3822 3823 switch (code) { 3824 case MBA_RESET: /* Reset */ 3825 case MBA_SYSTEM_ERR: /* System Error */ 3826 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3827 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3828 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 3829 "qla_target(%d): System error async event %#x " 3830 "occurred", vha->vp_idx, code); 3831 break; 3832 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 3833 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3834 break; 3835 3836 case MBA_LOOP_UP: 3837 { 3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 3839 "qla_target(%d): Async LOOP_UP occurred " 3840 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3841 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3842 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3843 if (tgt->link_reinit_iocb_pending) { 3844 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 3845 0, 0, 0, 0, 0, 0); 3846 tgt->link_reinit_iocb_pending = 0; 3847 } 3848 break; 3849 } 3850 3851 case MBA_LIP_OCCURRED: 3852 case MBA_LOOP_DOWN: 3853 case MBA_LIP_RESET: 3854 case MBA_RSCN_UPDATE: 3855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 3856 "qla_target(%d): Async event %#x occurred " 3857 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3858 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3859 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3860 break; 3861 3862 case MBA_PORT_UPDATE: 3863 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 3864 "qla_target(%d): Port update async event %#x " 3865 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 3866 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 3867 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3868 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3869 3870 login_code = le16_to_cpu(mailbox[2]); 3871 if (login_code == 0x4) 3872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 3873 "Async MB 2: Got PLOGI Complete\n"); 3874 else if (login_code == 0x7) 3875 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 3876 "Async MB 2: Port Logged Out\n"); 3877 break; 3878 3879 default: 3880 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 3881 "qla_target(%d): Async event %#x occurred: " 3882 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3883 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3884 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3885 break; 3886 } 3887 3888 tgt->irq_cmd_count--; 3889 } 3890 3891 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 3892 uint16_t loop_id) 3893 { 3894 fc_port_t *fcport; 3895 int rc; 3896 3897 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 3898 if (!fcport) { 3899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 3900 "qla_target(%d): Allocation of tmp FC port failed", 3901 vha->vp_idx); 3902 return NULL; 3903 } 3904 3905 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 3906 3907 fcport->loop_id = loop_id; 3908 3909 rc = qla2x00_get_port_database(vha, fcport, 0); 3910 if (rc != QLA_SUCCESS) { 3911 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 3912 "qla_target(%d): Failed to retrieve fcport " 3913 "information -- get_port_database() returned %x " 3914 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 3915 kfree(fcport); 3916 return NULL; 3917 } 3918 3919 return fcport; 3920 } 3921 3922 /* Must be called under tgt_mutex */ 3923 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 3924 uint8_t *s_id) 3925 { 3926 struct qla_hw_data *ha = vha->hw; 3927 struct qla_tgt_sess *sess = NULL; 3928 fc_port_t *fcport = NULL; 3929 int rc, global_resets; 3930 uint16_t loop_id = 0; 3931 3932 retry: 3933 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 3934 3935 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 3936 if (rc != 0) { 3937 if ((s_id[0] == 0xFF) && 3938 (s_id[1] == 0xFC)) { 3939 /* 3940 * This is Domain Controller, so it should be 3941 * OK to drop SCSI commands from it. 3942 */ 3943 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 3944 "Unable to find initiator with S_ID %x:%x:%x", 3945 s_id[0], s_id[1], s_id[2]); 3946 } else 3947 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 3948 "qla_target(%d): Unable to find " 3949 "initiator with S_ID %x:%x:%x", 3950 vha->vp_idx, s_id[0], s_id[1], 3951 s_id[2]); 3952 return NULL; 3953 } 3954 3955 fcport = qlt_get_port_database(vha, loop_id); 3956 if (!fcport) 3957 return NULL; 3958 3959 if (global_resets != 3960 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { 3961 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 3962 "qla_target(%d): global reset during session discovery " 3963 "(counter was %d, new %d), retrying", vha->vp_idx, 3964 global_resets, 3965 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); 3966 goto retry; 3967 } 3968 3969 sess = qlt_create_sess(vha, fcport, true); 3970 3971 kfree(fcport); 3972 return sess; 3973 } 3974 3975 static void qlt_abort_work(struct qla_tgt *tgt, 3976 struct qla_tgt_sess_work_param *prm) 3977 { 3978 struct scsi_qla_host *vha = tgt->vha; 3979 struct qla_hw_data *ha = vha->hw; 3980 struct qla_tgt_sess *sess = NULL; 3981 unsigned long flags; 3982 uint32_t be_s_id; 3983 uint8_t s_id[3]; 3984 int rc; 3985 3986 spin_lock_irqsave(&ha->hardware_lock, flags); 3987 3988 if (tgt->tgt_stop) 3989 goto out_term; 3990 3991 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 3992 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 3993 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 3994 3995 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3996 (unsigned char *)&be_s_id); 3997 if (!sess) { 3998 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3999 4000 mutex_lock(&ha->tgt.tgt_mutex); 4001 sess = qlt_make_local_sess(vha, s_id); 4002 /* sess has got an extra creation ref */ 4003 mutex_unlock(&ha->tgt.tgt_mutex); 4004 4005 spin_lock_irqsave(&ha->hardware_lock, flags); 4006 if (!sess) 4007 goto out_term; 4008 } else { 4009 kref_get(&sess->se_sess->sess_kref); 4010 } 4011 4012 if (tgt->tgt_stop) 4013 goto out_term; 4014 4015 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4016 if (rc != 0) 4017 goto out_term; 4018 4019 ha->tgt.tgt_ops->put_sess(sess); 4020 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4021 return; 4022 4023 out_term: 4024 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4025 if (sess) 4026 ha->tgt.tgt_ops->put_sess(sess); 4027 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4028 } 4029 4030 static void qlt_tmr_work(struct qla_tgt *tgt, 4031 struct qla_tgt_sess_work_param *prm) 4032 { 4033 struct atio_from_isp *a = &prm->tm_iocb2; 4034 struct scsi_qla_host *vha = tgt->vha; 4035 struct qla_hw_data *ha = vha->hw; 4036 struct qla_tgt_sess *sess = NULL; 4037 unsigned long flags; 4038 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4039 int rc; 4040 uint32_t lun, unpacked_lun; 4041 int lun_size, fn; 4042 void *iocb; 4043 4044 spin_lock_irqsave(&ha->hardware_lock, flags); 4045 4046 if (tgt->tgt_stop) 4047 goto out_term; 4048 4049 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4050 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4051 if (!sess) { 4052 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4053 4054 mutex_lock(&ha->tgt.tgt_mutex); 4055 sess = qlt_make_local_sess(vha, s_id); 4056 /* sess has got an extra creation ref */ 4057 mutex_unlock(&ha->tgt.tgt_mutex); 4058 4059 spin_lock_irqsave(&ha->hardware_lock, flags); 4060 if (!sess) 4061 goto out_term; 4062 } else { 4063 kref_get(&sess->se_sess->sess_kref); 4064 } 4065 4066 iocb = a; 4067 lun = a->u.isp24.fcp_cmnd.lun; 4068 lun_size = sizeof(lun); 4069 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4070 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4071 4072 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4073 if (rc != 0) 4074 goto out_term; 4075 4076 ha->tgt.tgt_ops->put_sess(sess); 4077 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4078 return; 4079 4080 out_term: 4081 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4082 if (sess) 4083 ha->tgt.tgt_ops->put_sess(sess); 4084 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4085 } 4086 4087 static void qlt_sess_work_fn(struct work_struct *work) 4088 { 4089 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4090 struct scsi_qla_host *vha = tgt->vha; 4091 unsigned long flags; 4092 4093 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4094 4095 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4096 while (!list_empty(&tgt->sess_works_list)) { 4097 struct qla_tgt_sess_work_param *prm = list_entry( 4098 tgt->sess_works_list.next, typeof(*prm), 4099 sess_works_list_entry); 4100 4101 /* 4102 * This work can be scheduled on several CPUs at time, so we 4103 * must delete the entry to eliminate double processing 4104 */ 4105 list_del(&prm->sess_works_list_entry); 4106 4107 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4108 4109 switch (prm->type) { 4110 case QLA_TGT_SESS_WORK_ABORT: 4111 qlt_abort_work(tgt, prm); 4112 break; 4113 case QLA_TGT_SESS_WORK_TM: 4114 qlt_tmr_work(tgt, prm); 4115 break; 4116 default: 4117 BUG_ON(1); 4118 break; 4119 } 4120 4121 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4122 4123 kfree(prm); 4124 } 4125 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4126 } 4127 4128 /* Must be called under tgt_host_action_mutex */ 4129 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4130 { 4131 struct qla_tgt *tgt; 4132 4133 if (!QLA_TGT_MODE_ENABLED()) 4134 return 0; 4135 4136 if (!IS_TGT_MODE_CAPABLE(ha)) { 4137 ql_log(ql_log_warn, base_vha, 0xe070, 4138 "This adapter does not support target mode.\n"); 4139 return 0; 4140 } 4141 4142 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4143 "Registering target for host %ld(%p)", base_vha->host_no, ha); 4144 4145 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); 4146 4147 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4148 if (!tgt) { 4149 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4150 "Unable to allocate struct qla_tgt\n"); 4151 return -ENOMEM; 4152 } 4153 4154 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4155 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4156 4157 tgt->ha = ha; 4158 tgt->vha = base_vha; 4159 init_waitqueue_head(&tgt->waitQ); 4160 INIT_LIST_HEAD(&tgt->sess_list); 4161 INIT_LIST_HEAD(&tgt->del_sess_list); 4162 INIT_DELAYED_WORK(&tgt->sess_del_work, 4163 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4164 spin_lock_init(&tgt->sess_work_lock); 4165 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4166 INIT_LIST_HEAD(&tgt->sess_works_list); 4167 spin_lock_init(&tgt->srr_lock); 4168 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4169 INIT_LIST_HEAD(&tgt->srr_imm_list); 4170 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4171 atomic_set(&tgt->tgt_global_resets_count, 0); 4172 4173 ha->tgt.qla_tgt = tgt; 4174 4175 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4176 "qla_target(%d): using 64 Bit PCI addressing", 4177 base_vha->vp_idx); 4178 tgt->tgt_enable_64bit_addr = 1; 4179 /* 3 is reserved */ 4180 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4181 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4182 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4183 4184 mutex_lock(&qla_tgt_mutex); 4185 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4186 mutex_unlock(&qla_tgt_mutex); 4187 4188 return 0; 4189 } 4190 4191 /* Must be called under tgt_host_action_mutex */ 4192 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4193 { 4194 if (!ha->tgt.qla_tgt) 4195 return 0; 4196 4197 mutex_lock(&qla_tgt_mutex); 4198 list_del(&ha->tgt.qla_tgt->tgt_list_entry); 4199 mutex_unlock(&qla_tgt_mutex); 4200 4201 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4202 vha->host_no, ha); 4203 qlt_release(ha->tgt.qla_tgt); 4204 4205 return 0; 4206 } 4207 4208 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4209 unsigned char *b) 4210 { 4211 int i; 4212 4213 pr_debug("qla2xxx HW vha->node_name: "); 4214 for (i = 0; i < WWN_SIZE; i++) 4215 pr_debug("%02x ", vha->node_name[i]); 4216 pr_debug("\n"); 4217 pr_debug("qla2xxx HW vha->port_name: "); 4218 for (i = 0; i < WWN_SIZE; i++) 4219 pr_debug("%02x ", vha->port_name[i]); 4220 pr_debug("\n"); 4221 4222 pr_debug("qla2xxx passed configfs WWPN: "); 4223 put_unaligned_be64(wwpn, b); 4224 for (i = 0; i < WWN_SIZE; i++) 4225 pr_debug("%02x ", b[i]); 4226 pr_debug("\n"); 4227 } 4228 4229 /** 4230 * qla_tgt_lport_register - register lport with external module 4231 * 4232 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4233 * @wwpn: Passwd FC target WWPN 4234 * @callback: lport initialization callback for tcm_qla2xxx code 4235 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4236 */ 4237 int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, 4238 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) 4239 { 4240 struct qla_tgt *tgt; 4241 struct scsi_qla_host *vha; 4242 struct qla_hw_data *ha; 4243 struct Scsi_Host *host; 4244 unsigned long flags; 4245 int rc; 4246 u8 b[WWN_SIZE]; 4247 4248 mutex_lock(&qla_tgt_mutex); 4249 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4250 vha = tgt->vha; 4251 ha = vha->hw; 4252 4253 host = vha->host; 4254 if (!host) 4255 continue; 4256 4257 if (ha->tgt.tgt_ops != NULL) 4258 continue; 4259 4260 if (!(host->hostt->supported_mode & MODE_TARGET)) 4261 continue; 4262 4263 spin_lock_irqsave(&ha->hardware_lock, flags); 4264 if (host->active_mode & MODE_TARGET) { 4265 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4266 host->host_no); 4267 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4268 continue; 4269 } 4270 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4271 4272 if (!scsi_host_get(host)) { 4273 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4274 "Unable to scsi_host_get() for" 4275 " qla2xxx scsi_host\n"); 4276 continue; 4277 } 4278 qlt_lport_dump(vha, wwpn, b); 4279 4280 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4281 scsi_host_put(host); 4282 continue; 4283 } 4284 /* 4285 * Setup passed parameters ahead of invoking callback 4286 */ 4287 ha->tgt.tgt_ops = qla_tgt_ops; 4288 ha->tgt.target_lport_ptr = target_lport_ptr; 4289 rc = (*callback)(vha); 4290 if (rc != 0) { 4291 ha->tgt.tgt_ops = NULL; 4292 ha->tgt.target_lport_ptr = NULL; 4293 } 4294 mutex_unlock(&qla_tgt_mutex); 4295 return rc; 4296 } 4297 mutex_unlock(&qla_tgt_mutex); 4298 4299 return -ENODEV; 4300 } 4301 EXPORT_SYMBOL(qlt_lport_register); 4302 4303 /** 4304 * qla_tgt_lport_deregister - Degister lport 4305 * 4306 * @vha: Registered scsi_qla_host pointer 4307 */ 4308 void qlt_lport_deregister(struct scsi_qla_host *vha) 4309 { 4310 struct qla_hw_data *ha = vha->hw; 4311 struct Scsi_Host *sh = vha->host; 4312 /* 4313 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4314 */ 4315 ha->tgt.target_lport_ptr = NULL; 4316 ha->tgt.tgt_ops = NULL; 4317 /* 4318 * Release the Scsi_Host reference for the underlying qla2xxx host 4319 */ 4320 scsi_host_put(sh); 4321 } 4322 EXPORT_SYMBOL(qlt_lport_deregister); 4323 4324 /* Must be called under HW lock */ 4325 void qlt_set_mode(struct scsi_qla_host *vha) 4326 { 4327 struct qla_hw_data *ha = vha->hw; 4328 4329 switch (ql2x_ini_mode) { 4330 case QLA2XXX_INI_MODE_DISABLED: 4331 case QLA2XXX_INI_MODE_EXCLUSIVE: 4332 vha->host->active_mode = MODE_TARGET; 4333 break; 4334 case QLA2XXX_INI_MODE_ENABLED: 4335 vha->host->active_mode |= MODE_TARGET; 4336 break; 4337 default: 4338 break; 4339 } 4340 4341 if (ha->tgt.ini_mode_force_reverse) 4342 qla_reverse_ini_mode(vha); 4343 } 4344 4345 /* Must be called under HW lock */ 4346 void qlt_clear_mode(struct scsi_qla_host *vha) 4347 { 4348 struct qla_hw_data *ha = vha->hw; 4349 4350 switch (ql2x_ini_mode) { 4351 case QLA2XXX_INI_MODE_DISABLED: 4352 vha->host->active_mode = MODE_UNKNOWN; 4353 break; 4354 case QLA2XXX_INI_MODE_EXCLUSIVE: 4355 vha->host->active_mode = MODE_INITIATOR; 4356 break; 4357 case QLA2XXX_INI_MODE_ENABLED: 4358 vha->host->active_mode &= ~MODE_TARGET; 4359 break; 4360 default: 4361 break; 4362 } 4363 4364 if (ha->tgt.ini_mode_force_reverse) 4365 qla_reverse_ini_mode(vha); 4366 } 4367 4368 /* 4369 * qla_tgt_enable_vha - NO LOCK HELD 4370 * 4371 * host_reset, bring up w/ Target Mode Enabled 4372 */ 4373 void 4374 qlt_enable_vha(struct scsi_qla_host *vha) 4375 { 4376 struct qla_hw_data *ha = vha->hw; 4377 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4378 unsigned long flags; 4379 4380 if (!tgt) { 4381 ql_dbg(ql_dbg_tgt, vha, 0xe069, 4382 "Unable to locate qla_tgt pointer from" 4383 " struct qla_hw_data\n"); 4384 dump_stack(); 4385 return; 4386 } 4387 4388 spin_lock_irqsave(&ha->hardware_lock, flags); 4389 tgt->tgt_stopped = 0; 4390 qlt_set_mode(vha); 4391 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4392 4393 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4394 qla2xxx_wake_dpc(vha); 4395 qla2x00_wait_for_hba_online(vha); 4396 } 4397 EXPORT_SYMBOL(qlt_enable_vha); 4398 4399 /* 4400 * qla_tgt_disable_vha - NO LOCK HELD 4401 * 4402 * Disable Target Mode and reset the adapter 4403 */ 4404 void 4405 qlt_disable_vha(struct scsi_qla_host *vha) 4406 { 4407 struct qla_hw_data *ha = vha->hw; 4408 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4409 unsigned long flags; 4410 4411 if (!tgt) { 4412 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 4413 "Unable to locate qla_tgt pointer from" 4414 " struct qla_hw_data\n"); 4415 dump_stack(); 4416 return; 4417 } 4418 4419 spin_lock_irqsave(&ha->hardware_lock, flags); 4420 qlt_clear_mode(vha); 4421 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4422 4423 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4424 qla2xxx_wake_dpc(vha); 4425 qla2x00_wait_for_hba_online(vha); 4426 } 4427 4428 /* 4429 * Called from qla_init.c:qla24xx_vport_create() contex to setup 4430 * the target mode specific struct scsi_qla_host and struct qla_hw_data 4431 * members. 4432 */ 4433 void 4434 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 4435 { 4436 if (!qla_tgt_mode_enabled(vha)) 4437 return; 4438 4439 mutex_init(&ha->tgt.tgt_mutex); 4440 mutex_init(&ha->tgt.tgt_host_action_mutex); 4441 4442 qlt_clear_mode(vha); 4443 4444 /* 4445 * NOTE: Currently the value is kept the same for <24xx and 4446 * >=24xx ISPs. If it is necessary to change it, 4447 * the check should be added for specific ISPs, 4448 * assigning the value appropriately. 4449 */ 4450 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4451 } 4452 4453 void 4454 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 4455 { 4456 /* 4457 * FC-4 Feature bit 0 indicates target functionality to the name server. 4458 */ 4459 if (qla_tgt_mode_enabled(vha)) { 4460 if (qla_ini_mode_enabled(vha)) 4461 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 4462 else 4463 ct_req->req.rff_id.fc4_feature = BIT_0; 4464 } else if (qla_ini_mode_enabled(vha)) { 4465 ct_req->req.rff_id.fc4_feature = BIT_1; 4466 } 4467 } 4468 4469 /* 4470 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 4471 * @ha: HA context 4472 * 4473 * Beginning of ATIO ring has initialization control block already built 4474 * by nvram config routine. 4475 * 4476 * Returns 0 on success. 4477 */ 4478 void 4479 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 4480 { 4481 struct qla_hw_data *ha = vha->hw; 4482 uint16_t cnt; 4483 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 4484 4485 if (!qla_tgt_mode_enabled(vha)) 4486 return; 4487 4488 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 4489 pkt->u.raw.signature = ATIO_PROCESSED; 4490 pkt++; 4491 } 4492 4493 } 4494 4495 /* 4496 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 4497 * @ha: SCSI driver HA context 4498 */ 4499 void 4500 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 4501 { 4502 struct qla_hw_data *ha = vha->hw; 4503 struct atio_from_isp *pkt; 4504 int cnt, i; 4505 4506 if (!vha->flags.online) 4507 return; 4508 4509 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 4510 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4511 cnt = pkt->u.raw.entry_count; 4512 4513 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 4514 4515 for (i = 0; i < cnt; i++) { 4516 ha->tgt.atio_ring_index++; 4517 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 4518 ha->tgt.atio_ring_index = 0; 4519 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4520 } else 4521 ha->tgt.atio_ring_ptr++; 4522 4523 pkt->u.raw.signature = ATIO_PROCESSED; 4524 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4525 } 4526 wmb(); 4527 } 4528 4529 /* Adjust ring index */ 4530 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 4531 } 4532 4533 void 4534 qlt_24xx_config_rings(struct scsi_qla_host *vha) 4535 { 4536 struct qla_hw_data *ha = vha->hw; 4537 if (!QLA_TGT_MODE_ENABLED()) 4538 return; 4539 4540 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 4541 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 4542 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 4543 4544 if (IS_ATIO_MSIX_CAPABLE(ha)) { 4545 struct qla_msix_entry *msix = &ha->msix_entries[2]; 4546 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 4547 4548 icb->msix_atio = cpu_to_le16(msix->entry); 4549 ql_dbg(ql_dbg_init, vha, 0xf072, 4550 "Registering ICB vector 0x%x for atio que.\n", 4551 msix->entry); 4552 } 4553 } 4554 4555 void 4556 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 4557 { 4558 struct qla_hw_data *ha = vha->hw; 4559 4560 if (qla_tgt_mode_enabled(vha)) { 4561 if (!ha->tgt.saved_set) { 4562 /* We save only once */ 4563 ha->tgt.saved_exchange_count = nv->exchange_count; 4564 ha->tgt.saved_firmware_options_1 = 4565 nv->firmware_options_1; 4566 ha->tgt.saved_firmware_options_2 = 4567 nv->firmware_options_2; 4568 ha->tgt.saved_firmware_options_3 = 4569 nv->firmware_options_3; 4570 ha->tgt.saved_set = 1; 4571 } 4572 4573 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4574 4575 /* Enable target mode */ 4576 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4577 4578 /* Disable ini mode, if requested */ 4579 if (!qla_ini_mode_enabled(vha)) 4580 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 4581 4582 /* Disable Full Login after LIP */ 4583 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4584 /* Enable initial LIP */ 4585 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4586 /* Enable FC tapes support */ 4587 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4588 /* Disable Full Login after LIP */ 4589 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4590 /* Enable target PRLI control */ 4591 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4592 } else { 4593 if (ha->tgt.saved_set) { 4594 nv->exchange_count = ha->tgt.saved_exchange_count; 4595 nv->firmware_options_1 = 4596 ha->tgt.saved_firmware_options_1; 4597 nv->firmware_options_2 = 4598 ha->tgt.saved_firmware_options_2; 4599 nv->firmware_options_3 = 4600 ha->tgt.saved_firmware_options_3; 4601 } 4602 return; 4603 } 4604 4605 /* out-of-order frames reassembly */ 4606 nv->firmware_options_3 |= BIT_6|BIT_9; 4607 4608 if (ha->tgt.enable_class_2) { 4609 if (vha->flags.init_done) 4610 fc_host_supported_classes(vha->host) = 4611 FC_COS_CLASS2 | FC_COS_CLASS3; 4612 4613 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4614 } else { 4615 if (vha->flags.init_done) 4616 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4617 4618 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4619 } 4620 } 4621 4622 void 4623 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 4624 struct init_cb_24xx *icb) 4625 { 4626 struct qla_hw_data *ha = vha->hw; 4627 4628 if (ha->tgt.node_name_set) { 4629 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4630 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4631 } 4632 } 4633 4634 void 4635 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 4636 { 4637 struct qla_hw_data *ha = vha->hw; 4638 4639 if (!QLA_TGT_MODE_ENABLED()) 4640 return; 4641 4642 if (qla_tgt_mode_enabled(vha)) { 4643 if (!ha->tgt.saved_set) { 4644 /* We save only once */ 4645 ha->tgt.saved_exchange_count = nv->exchange_count; 4646 ha->tgt.saved_firmware_options_1 = 4647 nv->firmware_options_1; 4648 ha->tgt.saved_firmware_options_2 = 4649 nv->firmware_options_2; 4650 ha->tgt.saved_firmware_options_3 = 4651 nv->firmware_options_3; 4652 ha->tgt.saved_set = 1; 4653 } 4654 4655 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4656 4657 /* Enable target mode */ 4658 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4659 4660 /* Disable ini mode, if requested */ 4661 if (!qla_ini_mode_enabled(vha)) 4662 nv->firmware_options_1 |= 4663 __constant_cpu_to_le32(BIT_5); 4664 4665 /* Disable Full Login after LIP */ 4666 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4667 /* Enable initial LIP */ 4668 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4669 /* Enable FC tapes support */ 4670 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4671 /* Disable Full Login after LIP */ 4672 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4673 /* Enable target PRLI control */ 4674 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4675 } else { 4676 if (ha->tgt.saved_set) { 4677 nv->exchange_count = ha->tgt.saved_exchange_count; 4678 nv->firmware_options_1 = 4679 ha->tgt.saved_firmware_options_1; 4680 nv->firmware_options_2 = 4681 ha->tgt.saved_firmware_options_2; 4682 nv->firmware_options_3 = 4683 ha->tgt.saved_firmware_options_3; 4684 } 4685 return; 4686 } 4687 4688 /* out-of-order frames reassembly */ 4689 nv->firmware_options_3 |= BIT_6|BIT_9; 4690 4691 if (ha->tgt.enable_class_2) { 4692 if (vha->flags.init_done) 4693 fc_host_supported_classes(vha->host) = 4694 FC_COS_CLASS2 | FC_COS_CLASS3; 4695 4696 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4697 } else { 4698 if (vha->flags.init_done) 4699 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4700 4701 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4702 } 4703 } 4704 4705 void 4706 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 4707 struct init_cb_81xx *icb) 4708 { 4709 struct qla_hw_data *ha = vha->hw; 4710 4711 if (!QLA_TGT_MODE_ENABLED()) 4712 return; 4713 4714 if (ha->tgt.node_name_set) { 4715 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4716 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4717 } 4718 } 4719 4720 void 4721 qlt_83xx_iospace_config(struct qla_hw_data *ha) 4722 { 4723 if (!QLA_TGT_MODE_ENABLED()) 4724 return; 4725 4726 ha->msix_count += 1; /* For ATIO Q */ 4727 } 4728 4729 int 4730 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 4731 struct sts_entry_24xx *pkt) 4732 { 4733 switch (pkt->entry_type) { 4734 case ABTS_RECV_24XX: 4735 case ABTS_RESP_24XX: 4736 case CTIO_TYPE7: 4737 case NOTIFY_ACK_TYPE: 4738 return 1; 4739 default: 4740 return 0; 4741 } 4742 } 4743 4744 void 4745 qlt_modify_vp_config(struct scsi_qla_host *vha, 4746 struct vp_config_entry_24xx *vpmod) 4747 { 4748 if (qla_tgt_mode_enabled(vha)) 4749 vpmod->options_idx1 &= ~BIT_5; 4750 /* Disable ini mode, if requested */ 4751 if (!qla_ini_mode_enabled(vha)) 4752 vpmod->options_idx1 &= ~BIT_4; 4753 } 4754 4755 void 4756 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 4757 { 4758 if (!QLA_TGT_MODE_ENABLED()) 4759 return; 4760 4761 if (ha->mqenable || IS_QLA83XX(ha)) { 4762 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 4763 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 4764 } else { 4765 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 4766 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4767 } 4768 4769 mutex_init(&ha->tgt.tgt_mutex); 4770 mutex_init(&ha->tgt.tgt_host_action_mutex); 4771 qlt_clear_mode(base_vha); 4772 } 4773 4774 irqreturn_t 4775 qla83xx_msix_atio_q(int irq, void *dev_id) 4776 { 4777 struct rsp_que *rsp; 4778 scsi_qla_host_t *vha; 4779 struct qla_hw_data *ha; 4780 unsigned long flags; 4781 4782 rsp = (struct rsp_que *) dev_id; 4783 ha = rsp->hw; 4784 vha = pci_get_drvdata(ha->pdev); 4785 4786 spin_lock_irqsave(&ha->hardware_lock, flags); 4787 4788 qlt_24xx_process_atio_queue(vha); 4789 qla24xx_process_response_queue(vha, rsp); 4790 4791 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4792 4793 return IRQ_HANDLED; 4794 } 4795 4796 int 4797 qlt_mem_alloc(struct qla_hw_data *ha) 4798 { 4799 if (!QLA_TGT_MODE_ENABLED()) 4800 return 0; 4801 4802 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 4803 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 4804 if (!ha->tgt.tgt_vp_map) 4805 return -ENOMEM; 4806 4807 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 4808 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 4809 &ha->tgt.atio_dma, GFP_KERNEL); 4810 if (!ha->tgt.atio_ring) { 4811 kfree(ha->tgt.tgt_vp_map); 4812 return -ENOMEM; 4813 } 4814 return 0; 4815 } 4816 4817 void 4818 qlt_mem_free(struct qla_hw_data *ha) 4819 { 4820 if (!QLA_TGT_MODE_ENABLED()) 4821 return; 4822 4823 if (ha->tgt.atio_ring) { 4824 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 4825 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 4826 ha->tgt.atio_dma); 4827 } 4828 kfree(ha->tgt.tgt_vp_map); 4829 } 4830 4831 /* vport_slock to be held by the caller */ 4832 void 4833 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 4834 { 4835 if (!QLA_TGT_MODE_ENABLED()) 4836 return; 4837 4838 switch (cmd) { 4839 case SET_VP_IDX: 4840 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 4841 break; 4842 case SET_AL_PA: 4843 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 4844 break; 4845 case RESET_VP_IDX: 4846 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 4847 break; 4848 case RESET_AL_PA: 4849 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 4850 break; 4851 } 4852 } 4853 4854 static int __init qlt_parse_ini_mode(void) 4855 { 4856 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 4857 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 4858 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 4859 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 4860 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 4861 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 4862 else 4863 return false; 4864 4865 return true; 4866 } 4867 4868 int __init qlt_init(void) 4869 { 4870 int ret; 4871 4872 if (!qlt_parse_ini_mode()) { 4873 ql_log(ql_log_fatal, NULL, 0xe06b, 4874 "qlt_parse_ini_mode() failed\n"); 4875 return -EINVAL; 4876 } 4877 4878 if (!QLA_TGT_MODE_ENABLED()) 4879 return 0; 4880 4881 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", 4882 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, 4883 NULL); 4884 if (!qla_tgt_cmd_cachep) { 4885 ql_log(ql_log_fatal, NULL, 0xe06c, 4886 "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); 4887 return -ENOMEM; 4888 } 4889 4890 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 4891 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 4892 qla_tgt_mgmt_cmd), 0, NULL); 4893 if (!qla_tgt_mgmt_cmd_cachep) { 4894 ql_log(ql_log_fatal, NULL, 0xe06d, 4895 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 4896 ret = -ENOMEM; 4897 goto out; 4898 } 4899 4900 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 4901 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 4902 if (!qla_tgt_mgmt_cmd_mempool) { 4903 ql_log(ql_log_fatal, NULL, 0xe06e, 4904 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 4905 ret = -ENOMEM; 4906 goto out_mgmt_cmd_cachep; 4907 } 4908 4909 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 4910 if (!qla_tgt_wq) { 4911 ql_log(ql_log_fatal, NULL, 0xe06f, 4912 "alloc_workqueue for qla_tgt_wq failed\n"); 4913 ret = -ENOMEM; 4914 goto out_cmd_mempool; 4915 } 4916 /* 4917 * Return 1 to signal that initiator-mode is being disabled 4918 */ 4919 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 4920 4921 out_cmd_mempool: 4922 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4923 out_mgmt_cmd_cachep: 4924 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4925 out: 4926 kmem_cache_destroy(qla_tgt_cmd_cachep); 4927 return ret; 4928 } 4929 4930 void qlt_exit(void) 4931 { 4932 if (!QLA_TGT_MODE_ENABLED()) 4933 return; 4934 4935 destroy_workqueue(qla_tgt_wq); 4936 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 4937 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 4938 kmem_cache_destroy(qla_tgt_cmd_cachep); 4939 } 4940