1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 46 module_param(qlini_mode, charp, S_IRUGO); 47 MODULE_PARM_DESC(qlini_mode, 48 "Determines when initiator mode will be enabled. Possible values: " 49 "\"exclusive\" - initiator mode will be enabled on load, " 50 "disabled on enabling target mode and then on disabling target mode " 51 "enabled back; " 52 "\"disabled\" - initiator mode will never be enabled; " 53 "\"enabled\" (default) - initiator mode will always stay enabled."); 54 55 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 56 57 /* 58 * From scsi/fc/fc_fcp.h 59 */ 60 enum fcp_resp_rsp_codes { 61 FCP_TMF_CMPL = 0, 62 FCP_DATA_LEN_INVALID = 1, 63 FCP_CMND_FIELDS_INVALID = 2, 64 FCP_DATA_PARAM_MISMATCH = 3, 65 FCP_TMF_REJECTED = 4, 66 FCP_TMF_FAILED = 5, 67 FCP_TMF_INVALID_LUN = 9, 68 }; 69 70 /* 71 * fc_pri_ta from scsi/fc/fc_fcp.h 72 */ 73 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 74 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 75 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 76 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 77 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 78 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 79 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 80 81 /* 82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 83 * must be called under HW lock and could unlock/lock it inside. 84 * It isn't an issue, since in the current implementation on the time when 85 * those functions are called: 86 * 87 * - Either context is IRQ and only IRQ handler can modify HW data, 88 * including rings related fields, 89 * 90 * - Or access to target mode variables from struct qla_tgt doesn't 91 * cross those functions boundaries, except tgt_stop, which 92 * additionally protected by irq_cmd_count. 93 */ 94 /* Predefs for callbacks handed to qla2xxx LLD */ 95 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 96 struct atio_from_isp *pkt); 97 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); 98 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 99 int fn, void *iocb, int flags); 100 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 101 *cmd, struct atio_from_isp *atio, int ha_locked); 102 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 103 struct qla_tgt_srr_imm *imm, int ha_lock); 104 /* 105 * Global Variables 106 */ 107 static struct kmem_cache *qla_tgt_cmd_cachep; 108 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 109 static mempool_t *qla_tgt_mgmt_cmd_mempool; 110 static struct workqueue_struct *qla_tgt_wq; 111 static DEFINE_MUTEX(qla_tgt_mutex); 112 static LIST_HEAD(qla_tgt_glist); 113 114 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 115 static struct qla_tgt_sess *qlt_find_sess_by_port_name( 116 struct qla_tgt *tgt, 117 const uint8_t *port_name) 118 { 119 struct qla_tgt_sess *sess; 120 121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) { 122 if (!memcmp(sess->port_name, port_name, WWN_SIZE)) 123 return sess; 124 } 125 126 return NULL; 127 } 128 129 /* Might release hw lock, then reaquire!! */ 130 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 131 { 132 /* Send marker if required */ 133 if (unlikely(vha->marker_needed != 0)) { 134 int rc = qla2x00_issue_marker(vha, vha_locked); 135 if (rc != QLA_SUCCESS) { 136 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 137 "qla_target(%d): issue_marker() failed\n", 138 vha->vp_idx); 139 } 140 return rc; 141 } 142 return QLA_SUCCESS; 143 } 144 145 static inline 146 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 147 uint8_t *d_id) 148 { 149 struct qla_hw_data *ha = vha->hw; 150 uint8_t vp_idx; 151 152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) 153 return NULL; 154 155 if (vha->d_id.b.al_pa == d_id[2]) 156 return vha; 157 158 BUG_ON(ha->tgt.tgt_vp_map == NULL); 159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; 160 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 161 return ha->tgt.tgt_vp_map[vp_idx].vha; 162 163 return NULL; 164 } 165 166 static inline 167 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 168 uint16_t vp_idx) 169 { 170 struct qla_hw_data *ha = vha->hw; 171 172 if (vha->vp_idx == vp_idx) 173 return vha; 174 175 BUG_ON(ha->tgt.tgt_vp_map == NULL); 176 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 177 return ha->tgt.tgt_vp_map[vp_idx].vha; 178 179 return NULL; 180 } 181 182 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 183 struct atio_from_isp *atio) 184 { 185 switch (atio->u.raw.entry_type) { 186 case ATIO_TYPE7: 187 { 188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 189 atio->u.isp24.fcp_hdr.d_id); 190 if (unlikely(NULL == host)) { 191 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 192 "qla_target(%d): Received ATIO_TYPE7 " 193 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 194 atio->u.isp24.fcp_hdr.d_id[0], 195 atio->u.isp24.fcp_hdr.d_id[1], 196 atio->u.isp24.fcp_hdr.d_id[2]); 197 break; 198 } 199 qlt_24xx_atio_pkt(host, atio); 200 break; 201 } 202 203 case IMMED_NOTIFY_TYPE: 204 { 205 struct scsi_qla_host *host = vha; 206 struct imm_ntfy_from_isp *entry = 207 (struct imm_ntfy_from_isp *)atio; 208 209 if ((entry->u.isp24.vp_index != 0xFF) && 210 (entry->u.isp24.nport_handle != 0xFFFF)) { 211 host = qlt_find_host_by_vp_idx(vha, 212 entry->u.isp24.vp_index); 213 if (unlikely(!host)) { 214 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 215 "qla_target(%d): Received " 216 "ATIO (IMMED_NOTIFY_TYPE) " 217 "with unknown vp_index %d\n", 218 vha->vp_idx, entry->u.isp24.vp_index); 219 break; 220 } 221 } 222 qlt_24xx_atio_pkt(host, atio); 223 break; 224 } 225 226 default: 227 ql_dbg(ql_dbg_tgt, vha, 0xe040, 228 "qla_target(%d): Received unknown ATIO atio " 229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 230 break; 231 } 232 233 return; 234 } 235 236 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) 237 { 238 switch (pkt->entry_type) { 239 case CTIO_TYPE7: 240 { 241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 243 entry->vp_index); 244 if (unlikely(!host)) { 245 ql_dbg(ql_dbg_tgt, vha, 0xe041, 246 "qla_target(%d): Response pkt (CTIO_TYPE7) " 247 "received, with unknown vp_index %d\n", 248 vha->vp_idx, entry->vp_index); 249 break; 250 } 251 qlt_response_pkt(host, pkt); 252 break; 253 } 254 255 case IMMED_NOTIFY_TYPE: 256 { 257 struct scsi_qla_host *host = vha; 258 struct imm_ntfy_from_isp *entry = 259 (struct imm_ntfy_from_isp *)pkt; 260 261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 262 if (unlikely(!host)) { 263 ql_dbg(ql_dbg_tgt, vha, 0xe042, 264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 265 "received, with unknown vp_index %d\n", 266 vha->vp_idx, entry->u.isp24.vp_index); 267 break; 268 } 269 qlt_response_pkt(host, pkt); 270 break; 271 } 272 273 case NOTIFY_ACK_TYPE: 274 { 275 struct scsi_qla_host *host = vha; 276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 277 278 if (0xFF != entry->u.isp24.vp_index) { 279 host = qlt_find_host_by_vp_idx(vha, 280 entry->u.isp24.vp_index); 281 if (unlikely(!host)) { 282 ql_dbg(ql_dbg_tgt, vha, 0xe043, 283 "qla_target(%d): Response " 284 "pkt (NOTIFY_ACK_TYPE) " 285 "received, with unknown " 286 "vp_index %d\n", vha->vp_idx, 287 entry->u.isp24.vp_index); 288 break; 289 } 290 } 291 qlt_response_pkt(host, pkt); 292 break; 293 } 294 295 case ABTS_RECV_24XX: 296 { 297 struct abts_recv_from_24xx *entry = 298 (struct abts_recv_from_24xx *)pkt; 299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 300 entry->vp_index); 301 if (unlikely(!host)) { 302 ql_dbg(ql_dbg_tgt, vha, 0xe044, 303 "qla_target(%d): Response pkt " 304 "(ABTS_RECV_24XX) received, with unknown " 305 "vp_index %d\n", vha->vp_idx, entry->vp_index); 306 break; 307 } 308 qlt_response_pkt(host, pkt); 309 break; 310 } 311 312 case ABTS_RESP_24XX: 313 { 314 struct abts_resp_to_24xx *entry = 315 (struct abts_resp_to_24xx *)pkt; 316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 317 entry->vp_index); 318 if (unlikely(!host)) { 319 ql_dbg(ql_dbg_tgt, vha, 0xe045, 320 "qla_target(%d): Response pkt " 321 "(ABTS_RECV_24XX) received, with unknown " 322 "vp_index %d\n", vha->vp_idx, entry->vp_index); 323 break; 324 } 325 qlt_response_pkt(host, pkt); 326 break; 327 } 328 329 default: 330 qlt_response_pkt(vha, pkt); 331 break; 332 } 333 334 } 335 336 static void qlt_free_session_done(struct work_struct *work) 337 { 338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess, 339 free_work); 340 struct qla_tgt *tgt = sess->tgt; 341 struct scsi_qla_host *vha = sess->vha; 342 struct qla_hw_data *ha = vha->hw; 343 344 BUG_ON(!tgt); 345 /* 346 * Release the target session for FC Nexus from fabric module code. 347 */ 348 if (sess->se_sess != NULL) 349 ha->tgt.tgt_ops->free_session(sess); 350 351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 352 "Unregistration of sess %p finished\n", sess); 353 354 kfree(sess); 355 /* 356 * We need to protect against race, when tgt is freed before or 357 * inside wake_up() 358 */ 359 tgt->sess_count--; 360 if (tgt->sess_count == 0) 361 wake_up_all(&tgt->waitQ); 362 } 363 364 /* ha->hardware_lock supposed to be held on entry */ 365 void qlt_unreg_sess(struct qla_tgt_sess *sess) 366 { 367 struct scsi_qla_host *vha = sess->vha; 368 369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 370 371 list_del(&sess->sess_list_entry); 372 if (sess->deleted) 373 list_del(&sess->del_list_entry); 374 375 INIT_WORK(&sess->free_work, qlt_free_session_done); 376 schedule_work(&sess->free_work); 377 } 378 EXPORT_SYMBOL(qlt_unreg_sess); 379 380 /* ha->hardware_lock supposed to be held on entry */ 381 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 struct qla_tgt_sess *sess = NULL; 385 uint32_t unpacked_lun, lun = 0; 386 uint16_t loop_id; 387 int res = 0; 388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 389 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 390 391 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 392 if (loop_id == 0xFFFF) { 393 #if 0 /* FIXME: Re-enable Global event handling.. */ 394 /* Global event */ 395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 399 typeof(*sess), sess_list_entry); 400 switch (mcmd) { 401 case QLA_TGT_NEXUS_LOSS_SESS: 402 mcmd = QLA_TGT_NEXUS_LOSS; 403 break; 404 case QLA_TGT_ABORT_ALL_SESS: 405 mcmd = QLA_TGT_ABORT_ALL; 406 break; 407 case QLA_TGT_NEXUS_LOSS: 408 case QLA_TGT_ABORT_ALL: 409 break; 410 default: 411 ql_dbg(ql_dbg_tgt, vha, 0xe046, 412 "qla_target(%d): Not allowed " 413 "command %x in %s", vha->vp_idx, 414 mcmd, __func__); 415 sess = NULL; 416 break; 417 } 418 } else 419 sess = NULL; 420 #endif 421 } else { 422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 423 } 424 425 ql_dbg(ql_dbg_tgt, vha, 0xe000, 426 "Using sess for qla_tgt_reset: %p\n", sess); 427 if (!sess) { 428 res = -ESRCH; 429 return res; 430 } 431 432 ql_dbg(ql_dbg_tgt, vha, 0xe047, 433 "scsi(%ld): resetting (session %p from port " 434 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " 435 "mcmd %x, loop_id %d)\n", vha->host_no, sess, 436 sess->port_name[0], sess->port_name[1], 437 sess->port_name[2], sess->port_name[3], 438 sess->port_name[4], sess->port_name[5], 439 sess->port_name[6], sess->port_name[7], 440 mcmd, loop_id); 441 442 lun = a->u.isp24.fcp_cmnd.lun; 443 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 444 445 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, 446 iocb, QLA24XX_MGMT_SEND_NACK); 447 } 448 449 /* ha->hardware_lock supposed to be held on entry */ 450 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, 451 bool immediate) 452 { 453 struct qla_tgt *tgt = sess->tgt; 454 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 455 456 if (sess->deleted) 457 return; 458 459 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 460 "Scheduling sess %p for deletion\n", sess); 461 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); 462 sess->deleted = 1; 463 464 if (immediate) 465 dev_loss_tmo = 0; 466 467 sess->expires = jiffies + dev_loss_tmo * HZ; 468 469 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 470 "qla_target(%d): session for port %02x:%02x:%02x:" 471 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " 472 "deletion in %u secs (expires: %lu) immed: %d\n", 473 sess->vha->vp_idx, 474 sess->port_name[0], sess->port_name[1], 475 sess->port_name[2], sess->port_name[3], 476 sess->port_name[4], sess->port_name[5], 477 sess->port_name[6], sess->port_name[7], 478 sess->loop_id, dev_loss_tmo, sess->expires, immediate); 479 480 if (immediate) 481 schedule_delayed_work(&tgt->sess_del_work, 0); 482 else 483 schedule_delayed_work(&tgt->sess_del_work, 484 jiffies - sess->expires); 485 } 486 487 /* ha->hardware_lock supposed to be held on entry */ 488 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 489 { 490 struct qla_tgt_sess *sess; 491 492 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) 493 qlt_schedule_sess_for_deletion(sess, true); 494 495 /* At this point tgt could be already dead */ 496 } 497 498 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 499 uint16_t *loop_id) 500 { 501 struct qla_hw_data *ha = vha->hw; 502 dma_addr_t gid_list_dma; 503 struct gid_list_info *gid_list; 504 char *id_iter; 505 int res, rc, i; 506 uint16_t entries; 507 508 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 509 &gid_list_dma, GFP_KERNEL); 510 if (!gid_list) { 511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 512 "qla_target(%d): DMA Alloc failed of %u\n", 513 vha->vp_idx, qla2x00_gid_list_size(ha)); 514 return -ENOMEM; 515 } 516 517 /* Get list of logged in devices */ 518 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); 519 if (rc != QLA_SUCCESS) { 520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 521 "qla_target(%d): get_id_list() failed: %x\n", 522 vha->vp_idx, rc); 523 res = -1; 524 goto out_free_id_list; 525 } 526 527 id_iter = (char *)gid_list; 528 res = -1; 529 for (i = 0; i < entries; i++) { 530 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 531 if ((gid->al_pa == s_id[2]) && 532 (gid->area == s_id[1]) && 533 (gid->domain == s_id[0])) { 534 *loop_id = le16_to_cpu(gid->loop_id); 535 res = 0; 536 break; 537 } 538 id_iter += ha->gid_list_info_size; 539 } 540 541 out_free_id_list: 542 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 543 gid_list, gid_list_dma); 544 return res; 545 } 546 547 static bool qlt_check_fcport_exist(struct scsi_qla_host *vha, 548 struct qla_tgt_sess *sess) 549 { 550 struct qla_hw_data *ha = vha->hw; 551 struct qla_port_24xx_data *pmap24; 552 bool res, found = false; 553 int rc, i; 554 uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */ 555 uint16_t entries; 556 void *pmap; 557 int pmap_len; 558 fc_port_t *fcport; 559 int global_resets; 560 unsigned long flags; 561 562 retry: 563 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 564 565 rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len); 566 if (rc != QLA_SUCCESS) { 567 res = false; 568 goto out; 569 } 570 571 pmap24 = pmap; 572 entries = pmap_len/sizeof(*pmap24); 573 574 for (i = 0; i < entries; ++i) { 575 if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) { 576 loop_id = le16_to_cpu(pmap24[i].loop_id); 577 found = true; 578 break; 579 } 580 } 581 582 kfree(pmap); 583 584 if (!found) { 585 res = false; 586 goto out; 587 } 588 589 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046, 590 "qlt_check_fcport_exist(): loop_id %d", loop_id); 591 592 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 593 if (fcport == NULL) { 594 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047, 595 "qla_target(%d): Allocation of tmp FC port failed", 596 vha->vp_idx); 597 res = false; 598 goto out; 599 } 600 601 fcport->loop_id = loop_id; 602 603 rc = qla2x00_get_port_database(vha, fcport, 0); 604 if (rc != QLA_SUCCESS) { 605 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048, 606 "qla_target(%d): Failed to retrieve fcport " 607 "information -- get_port_database() returned %x " 608 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 609 res = false; 610 goto out_free_fcport; 611 } 612 613 if (global_resets != 614 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { 615 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 616 "qla_target(%d): global reset during session discovery" 617 " (counter was %d, new %d), retrying", 618 vha->vp_idx, global_resets, 619 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); 620 goto retry; 621 } 622 623 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, 624 "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, " 625 "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, 626 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, 627 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); 628 629 spin_lock_irqsave(&ha->hardware_lock, flags); 630 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 631 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 632 spin_unlock_irqrestore(&ha->hardware_lock, flags); 633 634 res = true; 635 636 out_free_fcport: 637 kfree(fcport); 638 639 out: 640 return res; 641 } 642 643 /* ha->hardware_lock supposed to be held on entry */ 644 static void qlt_undelete_sess(struct qla_tgt_sess *sess) 645 { 646 BUG_ON(!sess->deleted); 647 648 list_del(&sess->del_list_entry); 649 sess->deleted = 0; 650 } 651 652 static void qlt_del_sess_work_fn(struct delayed_work *work) 653 { 654 struct qla_tgt *tgt = container_of(work, struct qla_tgt, 655 sess_del_work); 656 struct scsi_qla_host *vha = tgt->vha; 657 struct qla_hw_data *ha = vha->hw; 658 struct qla_tgt_sess *sess; 659 unsigned long flags; 660 661 spin_lock_irqsave(&ha->hardware_lock, flags); 662 while (!list_empty(&tgt->del_sess_list)) { 663 sess = list_entry(tgt->del_sess_list.next, typeof(*sess), 664 del_list_entry); 665 if (time_after_eq(jiffies, sess->expires)) { 666 bool cancel; 667 668 qlt_undelete_sess(sess); 669 670 spin_unlock_irqrestore(&ha->hardware_lock, flags); 671 cancel = qlt_check_fcport_exist(vha, sess); 672 673 if (cancel) { 674 if (sess->deleted) { 675 /* 676 * sess was again deleted while we were 677 * discovering it 678 */ 679 spin_lock_irqsave(&ha->hardware_lock, 680 flags); 681 continue; 682 } 683 684 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049, 685 "qla_target(%d): cancel deletion of " 686 "session for port %02x:%02x:%02x:%02x:%02x:" 687 "%02x:%02x:%02x (loop ID %d), because " 688 " it isn't deleted by firmware", 689 vha->vp_idx, sess->port_name[0], 690 sess->port_name[1], sess->port_name[2], 691 sess->port_name[3], sess->port_name[4], 692 sess->port_name[5], sess->port_name[6], 693 sess->port_name[7], sess->loop_id); 694 } else { 695 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 696 "Timeout: sess %p about to be deleted\n", 697 sess); 698 ha->tgt.tgt_ops->shutdown_sess(sess); 699 ha->tgt.tgt_ops->put_sess(sess); 700 } 701 702 spin_lock_irqsave(&ha->hardware_lock, flags); 703 } else { 704 schedule_delayed_work(&tgt->sess_del_work, 705 jiffies - sess->expires); 706 break; 707 } 708 } 709 spin_unlock_irqrestore(&ha->hardware_lock, flags); 710 } 711 712 /* 713 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 714 * Caller must put it. 715 */ 716 static struct qla_tgt_sess *qlt_create_sess( 717 struct scsi_qla_host *vha, 718 fc_port_t *fcport, 719 bool local) 720 { 721 struct qla_hw_data *ha = vha->hw; 722 struct qla_tgt_sess *sess; 723 unsigned long flags; 724 unsigned char be_sid[3]; 725 726 /* Check to avoid double sessions */ 727 spin_lock_irqsave(&ha->hardware_lock, flags); 728 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, 729 sess_list_entry) { 730 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { 731 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, 732 "Double sess %p found (s_id %x:%x:%x, " 733 "loop_id %d), updating to d_id %x:%x:%x, " 734 "loop_id %d", sess, sess->s_id.b.domain, 735 sess->s_id.b.al_pa, sess->s_id.b.area, 736 sess->loop_id, fcport->d_id.b.domain, 737 fcport->d_id.b.al_pa, fcport->d_id.b.area, 738 fcport->loop_id); 739 740 if (sess->deleted) 741 qlt_undelete_sess(sess); 742 743 kref_get(&sess->se_sess->sess_kref); 744 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 745 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 746 747 if (sess->local && !local) 748 sess->local = 0; 749 spin_unlock_irqrestore(&ha->hardware_lock, flags); 750 751 return sess; 752 } 753 } 754 spin_unlock_irqrestore(&ha->hardware_lock, flags); 755 756 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 757 if (!sess) { 758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, 759 "qla_target(%u): session allocation failed, " 760 "all commands from port %02x:%02x:%02x:%02x:" 761 "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, 762 fcport->port_name[0], fcport->port_name[1], 763 fcport->port_name[2], fcport->port_name[3], 764 fcport->port_name[4], fcport->port_name[5], 765 fcport->port_name[6], fcport->port_name[7]); 766 767 return NULL; 768 } 769 sess->tgt = ha->tgt.qla_tgt; 770 sess->vha = vha; 771 sess->s_id = fcport->d_id; 772 sess->loop_id = fcport->loop_id; 773 sess->local = local; 774 775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 776 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 777 sess, ha->tgt.qla_tgt); 778 779 be_sid[0] = sess->s_id.b.domain; 780 be_sid[1] = sess->s_id.b.area; 781 be_sid[2] = sess->s_id.b.al_pa; 782 /* 783 * Determine if this fc_port->port_name is allowed to access 784 * target mode using explict NodeACLs+MappedLUNs, or using 785 * TPG demo mode. If this is successful a target mode FC nexus 786 * is created. 787 */ 788 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 789 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) { 790 kfree(sess); 791 return NULL; 792 } 793 /* 794 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess 795 * access across ->hardware_lock reaquire. 796 */ 797 kref_get(&sess->se_sess->sess_kref); 798 799 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED); 800 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name)); 801 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); 802 803 spin_lock_irqsave(&ha->hardware_lock, flags); 804 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); 805 ha->tgt.qla_tgt->sess_count++; 806 spin_unlock_irqrestore(&ha->hardware_lock, flags); 807 808 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 809 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" 810 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" 811 " completion %ssupported) added\n", 812 vha->vp_idx, local ? "local " : "", fcport->port_name[0], 813 fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], 814 fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], 815 fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, 816 sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? 817 "" : "not "); 818 819 return sess; 820 } 821 822 /* 823 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 824 */ 825 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 826 { 827 struct qla_hw_data *ha = vha->hw; 828 struct qla_tgt *tgt = ha->tgt.qla_tgt; 829 struct qla_tgt_sess *sess; 830 unsigned long flags; 831 832 if (!vha->hw->tgt.tgt_ops) 833 return; 834 835 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 836 return; 837 838 spin_lock_irqsave(&ha->hardware_lock, flags); 839 if (tgt->tgt_stop) { 840 spin_unlock_irqrestore(&ha->hardware_lock, flags); 841 return; 842 } 843 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 844 if (!sess) { 845 spin_unlock_irqrestore(&ha->hardware_lock, flags); 846 847 mutex_lock(&ha->tgt.tgt_mutex); 848 sess = qlt_create_sess(vha, fcport, false); 849 mutex_unlock(&ha->tgt.tgt_mutex); 850 851 spin_lock_irqsave(&ha->hardware_lock, flags); 852 } else { 853 kref_get(&sess->se_sess->sess_kref); 854 855 if (sess->deleted) { 856 qlt_undelete_sess(sess); 857 858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 859 "qla_target(%u): %ssession for port %02x:" 860 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " 861 "reappeared\n", vha->vp_idx, sess->local ? "local " 862 : "", sess->port_name[0], sess->port_name[1], 863 sess->port_name[2], sess->port_name[3], 864 sess->port_name[4], sess->port_name[5], 865 sess->port_name[6], sess->port_name[7], 866 sess->loop_id); 867 868 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 869 "Reappeared sess %p\n", sess); 870 } 871 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, 872 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 873 } 874 875 if (sess && sess->local) { 876 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 877 "qla_target(%u): local session for " 878 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 879 "(loop ID %d) became global\n", vha->vp_idx, 880 fcport->port_name[0], fcport->port_name[1], 881 fcport->port_name[2], fcport->port_name[3], 882 fcport->port_name[4], fcport->port_name[5], 883 fcport->port_name[6], fcport->port_name[7], 884 sess->loop_id); 885 sess->local = 0; 886 } 887 spin_unlock_irqrestore(&ha->hardware_lock, flags); 888 889 ha->tgt.tgt_ops->put_sess(sess); 890 } 891 892 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893 { 894 struct qla_hw_data *ha = vha->hw; 895 struct qla_tgt *tgt = ha->tgt.qla_tgt; 896 struct qla_tgt_sess *sess; 897 unsigned long flags; 898 899 if (!vha->hw->tgt.tgt_ops) 900 return; 901 902 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 903 return; 904 905 spin_lock_irqsave(&ha->hardware_lock, flags); 906 if (tgt->tgt_stop) { 907 spin_unlock_irqrestore(&ha->hardware_lock, flags); 908 return; 909 } 910 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 911 if (!sess) { 912 spin_unlock_irqrestore(&ha->hardware_lock, flags); 913 return; 914 } 915 916 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 917 918 sess->local = 1; 919 qlt_schedule_sess_for_deletion(sess, false); 920 spin_unlock_irqrestore(&ha->hardware_lock, flags); 921 } 922 923 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 924 { 925 struct qla_hw_data *ha = tgt->ha; 926 unsigned long flags; 927 int res; 928 /* 929 * We need to protect against race, when tgt is freed before or 930 * inside wake_up() 931 */ 932 spin_lock_irqsave(&ha->hardware_lock, flags); 933 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 934 "tgt %p, empty(sess_list)=%d sess_count=%d\n", 935 tgt, list_empty(&tgt->sess_list), tgt->sess_count); 936 res = (tgt->sess_count == 0); 937 spin_unlock_irqrestore(&ha->hardware_lock, flags); 938 939 return res; 940 } 941 942 /* Called by tcm_qla2xxx configfs code */ 943 void qlt_stop_phase1(struct qla_tgt *tgt) 944 { 945 struct scsi_qla_host *vha = tgt->vha; 946 struct qla_hw_data *ha = tgt->ha; 947 unsigned long flags; 948 949 if (tgt->tgt_stop || tgt->tgt_stopped) { 950 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 951 "Already in tgt->tgt_stop or tgt_stopped state\n"); 952 dump_stack(); 953 return; 954 } 955 956 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 957 vha->host_no, vha); 958 /* 959 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 960 * Lock is needed, because we still can get an incoming packet. 961 */ 962 mutex_lock(&ha->tgt.tgt_mutex); 963 spin_lock_irqsave(&ha->hardware_lock, flags); 964 tgt->tgt_stop = 1; 965 qlt_clear_tgt_db(tgt, true); 966 spin_unlock_irqrestore(&ha->hardware_lock, flags); 967 mutex_unlock(&ha->tgt.tgt_mutex); 968 969 flush_delayed_work(&tgt->sess_del_work); 970 971 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 972 "Waiting for sess works (tgt %p)", tgt); 973 spin_lock_irqsave(&tgt->sess_work_lock, flags); 974 while (!list_empty(&tgt->sess_works_list)) { 975 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 976 flush_scheduled_work(); 977 spin_lock_irqsave(&tgt->sess_work_lock, flags); 978 } 979 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 980 981 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 982 "Waiting for tgt %p: list_empty(sess_list)=%d " 983 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list), 984 tgt->sess_count); 985 986 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 987 988 /* Big hammer */ 989 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha)) 990 qlt_disable_vha(vha); 991 992 /* Wait for sessions to clear out (just in case) */ 993 wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); 994 } 995 EXPORT_SYMBOL(qlt_stop_phase1); 996 997 /* Called by tcm_qla2xxx configfs code */ 998 void qlt_stop_phase2(struct qla_tgt *tgt) 999 { 1000 struct qla_hw_data *ha = tgt->ha; 1001 unsigned long flags; 1002 1003 if (tgt->tgt_stopped) { 1004 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, 1005 "Already in tgt->tgt_stopped state\n"); 1006 dump_stack(); 1007 return; 1008 } 1009 1010 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, 1011 "Waiting for %d IRQ commands to complete (tgt %p)", 1012 tgt->irq_cmd_count, tgt); 1013 1014 mutex_lock(&ha->tgt.tgt_mutex); 1015 spin_lock_irqsave(&ha->hardware_lock, flags); 1016 while (tgt->irq_cmd_count != 0) { 1017 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1018 udelay(2); 1019 spin_lock_irqsave(&ha->hardware_lock, flags); 1020 } 1021 tgt->tgt_stop = 0; 1022 tgt->tgt_stopped = 1; 1023 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1024 mutex_unlock(&ha->tgt.tgt_mutex); 1025 1026 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", 1027 tgt); 1028 } 1029 EXPORT_SYMBOL(qlt_stop_phase2); 1030 1031 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1032 static void qlt_release(struct qla_tgt *tgt) 1033 { 1034 struct qla_hw_data *ha = tgt->ha; 1035 1036 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1037 qlt_stop_phase2(tgt); 1038 1039 ha->tgt.qla_tgt = NULL; 1040 1041 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, 1042 "Release of tgt %p finished\n", tgt); 1043 1044 kfree(tgt); 1045 } 1046 1047 /* ha->hardware_lock supposed to be held on entry */ 1048 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1049 const void *param, unsigned int param_size) 1050 { 1051 struct qla_tgt_sess_work_param *prm; 1052 unsigned long flags; 1053 1054 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1055 if (!prm) { 1056 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1057 "qla_target(%d): Unable to create session " 1058 "work, command will be refused", 0); 1059 return -ENOMEM; 1060 } 1061 1062 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1063 "Scheduling work (type %d, prm %p)" 1064 " to find session for param %p (size %d, tgt %p)\n", 1065 type, prm, param, param_size, tgt); 1066 1067 prm->type = type; 1068 memcpy(&prm->tm_iocb, param, param_size); 1069 1070 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1071 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1072 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1073 1074 schedule_work(&tgt->sess_work); 1075 1076 return 0; 1077 } 1078 1079 /* 1080 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1081 */ 1082 static void qlt_send_notify_ack(struct scsi_qla_host *vha, 1083 struct imm_ntfy_from_isp *ntfy, 1084 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1085 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1086 { 1087 struct qla_hw_data *ha = vha->hw; 1088 request_t *pkt; 1089 struct nack_to_isp *nack; 1090 1091 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1092 1093 /* Send marker if required */ 1094 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1095 return; 1096 1097 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 1098 if (!pkt) { 1099 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1100 "qla_target(%d): %s failed: unable to allocate " 1101 "request packet\n", vha->vp_idx, __func__); 1102 return; 1103 } 1104 1105 if (ha->tgt.qla_tgt != NULL) 1106 ha->tgt.qla_tgt->notify_ack_expected++; 1107 1108 pkt->entry_type = NOTIFY_ACK_TYPE; 1109 pkt->entry_count = 1; 1110 1111 nack = (struct nack_to_isp *)pkt; 1112 nack->ox_id = ntfy->ox_id; 1113 1114 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1115 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1116 nack->u.isp24.flags = ntfy->u.isp24.flags & 1117 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1118 } 1119 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1120 nack->u.isp24.status = ntfy->u.isp24.status; 1121 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1122 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1123 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1124 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1125 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1126 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1127 nack->u.isp24.srr_reject_code = srr_reject_code; 1128 nack->u.isp24.srr_reject_code_expl = srr_explan; 1129 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1130 1131 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1132 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1133 vha->vp_idx, nack->u.isp24.status); 1134 1135 qla2x00_start_iocbs(vha, vha->req); 1136 } 1137 1138 /* 1139 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1140 */ 1141 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, 1142 struct abts_recv_from_24xx *abts, uint32_t status, 1143 bool ids_reversed) 1144 { 1145 struct qla_hw_data *ha = vha->hw; 1146 struct abts_resp_to_24xx *resp; 1147 uint32_t f_ctl; 1148 uint8_t *p; 1149 1150 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1151 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1152 ha, abts, status); 1153 1154 /* Send marker if required */ 1155 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1156 return; 1157 1158 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1159 if (!resp) { 1160 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1161 "qla_target(%d): %s failed: unable to allocate " 1162 "request packet", vha->vp_idx, __func__); 1163 return; 1164 } 1165 1166 resp->entry_type = ABTS_RESP_24XX; 1167 resp->entry_count = 1; 1168 resp->nport_handle = abts->nport_handle; 1169 resp->vp_index = vha->vp_idx; 1170 resp->sof_type = abts->sof_type; 1171 resp->exchange_address = abts->exchange_address; 1172 resp->fcp_hdr_le = abts->fcp_hdr_le; 1173 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1174 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1175 F_CTL_SEQ_INITIATIVE); 1176 p = (uint8_t *)&f_ctl; 1177 resp->fcp_hdr_le.f_ctl[0] = *p++; 1178 resp->fcp_hdr_le.f_ctl[1] = *p++; 1179 resp->fcp_hdr_le.f_ctl[2] = *p; 1180 if (ids_reversed) { 1181 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1182 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1183 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1184 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1185 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1186 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1187 } else { 1188 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1189 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1190 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1191 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1192 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1193 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1194 } 1195 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1196 if (status == FCP_TMF_CMPL) { 1197 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1198 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1199 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1200 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1201 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1202 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1203 } else { 1204 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1205 resp->payload.ba_rjt.reason_code = 1206 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1207 /* Other bytes are zero */ 1208 } 1209 1210 ha->tgt.qla_tgt->abts_resp_expected++; 1211 1212 qla2x00_start_iocbs(vha, vha->req); 1213 } 1214 1215 /* 1216 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1217 */ 1218 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1219 struct abts_resp_from_24xx_fw *entry) 1220 { 1221 struct ctio7_to_24xx *ctio; 1222 1223 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1224 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1225 /* Send marker if required */ 1226 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1227 return; 1228 1229 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1230 if (ctio == NULL) { 1231 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1232 "qla_target(%d): %s failed: unable to allocate " 1233 "request packet\n", vha->vp_idx, __func__); 1234 return; 1235 } 1236 1237 /* 1238 * We've got on entrance firmware's response on by us generated 1239 * ABTS response. So, in it ID fields are reversed. 1240 */ 1241 1242 ctio->entry_type = CTIO_TYPE7; 1243 ctio->entry_count = 1; 1244 ctio->nport_handle = entry->nport_handle; 1245 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1246 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1247 ctio->vp_index = vha->vp_idx; 1248 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1249 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1250 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1251 ctio->exchange_addr = entry->exchange_addr_to_abort; 1252 ctio->u.status1.flags = 1253 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1254 CTIO7_FLAGS_TERMINATE); 1255 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1256 1257 qla2x00_start_iocbs(vha, vha->req); 1258 1259 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1260 FCP_TMF_CMPL, true); 1261 } 1262 1263 /* ha->hardware_lock supposed to be held on entry */ 1264 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1265 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1266 { 1267 struct qla_hw_data *ha = vha->hw; 1268 struct se_session *se_sess = sess->se_sess; 1269 struct qla_tgt_mgmt_cmd *mcmd; 1270 struct se_cmd *se_cmd; 1271 u32 lun = 0; 1272 int rc; 1273 bool found_lun = false; 1274 1275 spin_lock(&se_sess->sess_cmd_lock); 1276 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { 1277 struct qla_tgt_cmd *cmd = 1278 container_of(se_cmd, struct qla_tgt_cmd, se_cmd); 1279 if (cmd->tag == abts->exchange_addr_to_abort) { 1280 lun = cmd->unpacked_lun; 1281 found_lun = true; 1282 break; 1283 } 1284 } 1285 spin_unlock(&se_sess->sess_cmd_lock); 1286 1287 if (!found_lun) 1288 return -ENOENT; 1289 1290 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1291 "qla_target(%d): task abort (tag=%d)\n", 1292 vha->vp_idx, abts->exchange_addr_to_abort); 1293 1294 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 1295 if (mcmd == NULL) { 1296 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 1297 "qla_target(%d): %s: Allocation of ABORT cmd failed", 1298 vha->vp_idx, __func__); 1299 return -ENOMEM; 1300 } 1301 memset(mcmd, 0, sizeof(*mcmd)); 1302 1303 mcmd->sess = sess; 1304 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1305 1306 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1307 abts->exchange_addr_to_abort); 1308 if (rc != 0) { 1309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, 1310 "qla_target(%d): tgt_ops->handle_tmr()" 1311 " failed: %d", vha->vp_idx, rc); 1312 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1313 return -EFAULT; 1314 } 1315 1316 return 0; 1317 } 1318 1319 /* 1320 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1321 */ 1322 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1323 struct abts_recv_from_24xx *abts) 1324 { 1325 struct qla_hw_data *ha = vha->hw; 1326 struct qla_tgt_sess *sess; 1327 uint32_t tag = abts->exchange_addr_to_abort; 1328 uint8_t s_id[3]; 1329 int rc; 1330 1331 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 1332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 1333 "qla_target(%d): ABTS: Abort Sequence not " 1334 "supported\n", vha->vp_idx); 1335 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1336 return; 1337 } 1338 1339 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 1340 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 1341 "qla_target(%d): ABTS: Unknown Exchange " 1342 "Address received\n", vha->vp_idx); 1343 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1344 return; 1345 } 1346 1347 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 1348 "qla_target(%d): task abort (s_id=%x:%x:%x, " 1349 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 1350 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 1351 le32_to_cpu(abts->fcp_hdr_le.parameter)); 1352 1353 s_id[0] = abts->fcp_hdr_le.s_id[2]; 1354 s_id[1] = abts->fcp_hdr_le.s_id[1]; 1355 s_id[2] = abts->fcp_hdr_le.s_id[0]; 1356 1357 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 1358 if (!sess) { 1359 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 1360 "qla_target(%d): task abort for non-existant session\n", 1361 vha->vp_idx); 1362 rc = qlt_sched_sess_work(ha->tgt.qla_tgt, 1363 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); 1364 if (rc != 0) { 1365 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, 1366 false); 1367 } 1368 return; 1369 } 1370 1371 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1372 if (rc != 0) { 1373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1374 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 1375 vha->vp_idx, rc); 1376 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); 1377 return; 1378 } 1379 } 1380 1381 /* 1382 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1383 */ 1384 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, 1385 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 1386 { 1387 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1388 struct ctio7_to_24xx *ctio; 1389 1390 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1391 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1392 ha, atio, resp_code); 1393 1394 /* Send marker if required */ 1395 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS) 1396 return; 1397 1398 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL); 1399 if (ctio == NULL) { 1400 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 1401 "qla_target(%d): %s failed: unable to allocate " 1402 "request packet\n", ha->vp_idx, __func__); 1403 return; 1404 } 1405 1406 ctio->entry_type = CTIO_TYPE7; 1407 ctio->entry_count = 1; 1408 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1409 ctio->nport_handle = mcmd->sess->loop_id; 1410 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1411 ctio->vp_index = ha->vp_idx; 1412 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1413 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1414 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1415 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1416 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1417 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1418 CTIO7_FLAGS_SEND_STATUS); 1419 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1420 ctio->u.status1.scsi_status = 1421 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1422 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1423 ctio->u.status1.sense_data[0] = resp_code; 1424 1425 qla2x00_start_iocbs(ha, ha->req); 1426 } 1427 1428 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 1429 { 1430 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1431 } 1432 EXPORT_SYMBOL(qlt_free_mcmd); 1433 1434 /* callback from target fabric module code */ 1435 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 1436 { 1437 struct scsi_qla_host *vha = mcmd->sess->vha; 1438 struct qla_hw_data *ha = vha->hw; 1439 unsigned long flags; 1440 1441 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 1442 "TM response mcmd (%p) status %#x state %#x", 1443 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1444 1445 spin_lock_irqsave(&ha->hardware_lock, flags); 1446 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1447 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1448 0, 0, 0, 0, 0, 0); 1449 else { 1450 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1451 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1452 mcmd->fc_tm_rsp, false); 1453 else 1454 qlt_24xx_send_task_mgmt_ctio(vha, mcmd, 1455 mcmd->fc_tm_rsp); 1456 } 1457 /* 1458 * Make the callback for ->free_mcmd() to queue_work() and invoke 1459 * target_put_sess_cmd() to drop cmd_kref to 1. The final 1460 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 1461 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 1462 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 1463 * qlt_xmit_tm_rsp() returns here.. 1464 */ 1465 ha->tgt.tgt_ops->free_mcmd(mcmd); 1466 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1467 } 1468 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 1469 1470 /* No locks */ 1471 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 1472 { 1473 struct qla_tgt_cmd *cmd = prm->cmd; 1474 1475 BUG_ON(cmd->sg_cnt == 0); 1476 1477 prm->sg = (struct scatterlist *)cmd->sg; 1478 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg, 1479 cmd->sg_cnt, cmd->dma_data_direction); 1480 if (unlikely(prm->seg_cnt == 0)) 1481 goto out_err; 1482 1483 prm->cmd->sg_mapped = 1; 1484 1485 /* 1486 * If greater than four sg entries then we need to allocate 1487 * the continuation entries 1488 */ 1489 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) 1490 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 1491 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); 1492 1493 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", 1494 prm->seg_cnt, prm->req_cnt); 1495 return 0; 1496 1497 out_err: 1498 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d, 1499 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 1500 0, prm->cmd->sg_cnt); 1501 return -1; 1502 } 1503 1504 static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1505 struct qla_tgt_cmd *cmd) 1506 { 1507 struct qla_hw_data *ha = vha->hw; 1508 1509 BUG_ON(!cmd->sg_mapped); 1510 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1511 cmd->sg_mapped = 0; 1512 } 1513 1514 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1515 uint32_t req_cnt) 1516 { 1517 struct qla_hw_data *ha = vha->hw; 1518 device_reg_t __iomem *reg = ha->iobase; 1519 uint32_t cnt; 1520 1521 if (vha->req->cnt < (req_cnt + 2)) { 1522 cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); 1523 1524 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 1525 "Request ring circled: cnt=%d, vha->->ring_index=%d, " 1526 "vha->req->cnt=%d, req_cnt=%d\n", cnt, 1527 vha->req->ring_index, vha->req->cnt, req_cnt); 1528 if (vha->req->ring_index < cnt) 1529 vha->req->cnt = cnt - vha->req->ring_index; 1530 else 1531 vha->req->cnt = vha->req->length - 1532 (vha->req->ring_index - cnt); 1533 } 1534 1535 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1536 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1537 "qla_target(%d): There is no room in the " 1538 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1539 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1540 vha->req->cnt, req_cnt); 1541 return -EAGAIN; 1542 } 1543 vha->req->cnt -= req_cnt; 1544 1545 return 0; 1546 } 1547 1548 /* 1549 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1550 */ 1551 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha) 1552 { 1553 /* Adjust ring index. */ 1554 vha->req->ring_index++; 1555 if (vha->req->ring_index == vha->req->length) { 1556 vha->req->ring_index = 0; 1557 vha->req->ring_ptr = vha->req->ring; 1558 } else { 1559 vha->req->ring_ptr++; 1560 } 1561 return (cont_entry_t *)vha->req->ring_ptr; 1562 } 1563 1564 /* ha->hardware_lock supposed to be held on entry */ 1565 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha) 1566 { 1567 struct qla_hw_data *ha = vha->hw; 1568 uint32_t h; 1569 1570 h = ha->tgt.current_handle; 1571 /* always increment cmd handle */ 1572 do { 1573 ++h; 1574 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1575 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1576 if (h == ha->tgt.current_handle) { 1577 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1578 "qla_target(%d): Ran out of " 1579 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1580 h = QLA_TGT_NULL_HANDLE; 1581 break; 1582 } 1583 } while ((h == QLA_TGT_NULL_HANDLE) || 1584 (h == QLA_TGT_SKIP_HANDLE) || 1585 (ha->tgt.cmds[h-1] != NULL)); 1586 1587 if (h != QLA_TGT_NULL_HANDLE) 1588 ha->tgt.current_handle = h; 1589 1590 return h; 1591 } 1592 1593 /* ha->hardware_lock supposed to be held on entry */ 1594 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, 1595 struct scsi_qla_host *vha) 1596 { 1597 uint32_t h; 1598 struct ctio7_to_24xx *pkt; 1599 struct qla_hw_data *ha = vha->hw; 1600 struct atio_from_isp *atio = &prm->cmd->atio; 1601 1602 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1603 prm->pkt = pkt; 1604 memset(pkt, 0, sizeof(*pkt)); 1605 1606 pkt->entry_type = CTIO_TYPE7; 1607 pkt->entry_count = (uint8_t)prm->req_cnt; 1608 pkt->vp_index = vha->vp_idx; 1609 1610 h = qlt_make_handle(vha); 1611 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 1612 /* 1613 * CTIO type 7 from the firmware doesn't provide a way to 1614 * know the initiator's LOOP ID, hence we can't find 1615 * the session and, so, the command. 1616 */ 1617 return -EAGAIN; 1618 } else 1619 ha->tgt.cmds[h-1] = prm->cmd; 1620 1621 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1622 pkt->nport_handle = prm->cmd->loop_id; 1623 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1624 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1625 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1626 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1627 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1628 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1629 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1630 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1631 1632 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1633 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1634 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1635 le16_to_cpu(pkt->u.status0.ox_id)); 1636 return 0; 1637 } 1638 1639 /* 1640 * ha->hardware_lock supposed to be held on entry. We have already made sure 1641 * that there is sufficient amount of request entries to not drop it. 1642 */ 1643 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm, 1644 struct scsi_qla_host *vha) 1645 { 1646 int cnt; 1647 uint32_t *dword_ptr; 1648 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1649 1650 /* Build continuation packets */ 1651 while (prm->seg_cnt > 0) { 1652 cont_a64_entry_t *cont_pkt64 = 1653 (cont_a64_entry_t *)qlt_get_req_pkt(vha); 1654 1655 /* 1656 * Make sure that from cont_pkt64 none of 1657 * 64-bit specific fields used for 32-bit 1658 * addressing. Cast to (cont_entry_t *) for 1659 * that. 1660 */ 1661 1662 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 1663 1664 cont_pkt64->entry_count = 1; 1665 cont_pkt64->sys_define = 0; 1666 1667 if (enable_64bit_addressing) { 1668 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 1669 dword_ptr = 1670 (uint32_t *)&cont_pkt64->dseg_0_address; 1671 } else { 1672 cont_pkt64->entry_type = CONTINUE_TYPE; 1673 dword_ptr = 1674 (uint32_t *)&((cont_entry_t *) 1675 cont_pkt64)->dseg_0_address; 1676 } 1677 1678 /* Load continuation entry data segments */ 1679 for (cnt = 0; 1680 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt; 1681 cnt++, prm->seg_cnt--) { 1682 *dword_ptr++ = 1683 cpu_to_le32(pci_dma_lo32 1684 (sg_dma_address(prm->sg))); 1685 if (enable_64bit_addressing) { 1686 *dword_ptr++ = 1687 cpu_to_le32(pci_dma_hi32 1688 (sg_dma_address 1689 (prm->sg))); 1690 } 1691 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1692 1693 ql_dbg(ql_dbg_tgt, vha, 0xe00d, 1694 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n", 1695 (long long unsigned int) 1696 pci_dma_hi32(sg_dma_address(prm->sg)), 1697 (long long unsigned int) 1698 pci_dma_lo32(sg_dma_address(prm->sg)), 1699 (int)sg_dma_len(prm->sg)); 1700 1701 prm->sg = sg_next(prm->sg); 1702 } 1703 } 1704 } 1705 1706 /* 1707 * ha->hardware_lock supposed to be held on entry. We have already made sure 1708 * that there is sufficient amount of request entries to not drop it. 1709 */ 1710 static void qlt_load_data_segments(struct qla_tgt_prm *prm, 1711 struct scsi_qla_host *vha) 1712 { 1713 int cnt; 1714 uint32_t *dword_ptr; 1715 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1716 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1717 1718 ql_dbg(ql_dbg_tgt, vha, 0xe00e, 1719 "iocb->scsi_status=%x, iocb->flags=%x\n", 1720 le16_to_cpu(pkt24->u.status0.scsi_status), 1721 le16_to_cpu(pkt24->u.status0.flags)); 1722 1723 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1724 1725 /* Setup packet address segment pointer */ 1726 dword_ptr = pkt24->u.status0.dseg_0_address; 1727 1728 /* Set total data segment count */ 1729 if (prm->seg_cnt) 1730 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 1731 1732 if (prm->seg_cnt == 0) { 1733 /* No data transfer */ 1734 *dword_ptr++ = 0; 1735 *dword_ptr = 0; 1736 return; 1737 } 1738 1739 /* If scatter gather */ 1740 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments..."); 1741 1742 /* Load command entry data segments */ 1743 for (cnt = 0; 1744 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt; 1745 cnt++, prm->seg_cnt--) { 1746 *dword_ptr++ = 1747 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 1748 if (enable_64bit_addressing) { 1749 *dword_ptr++ = 1750 cpu_to_le32(pci_dma_hi32( 1751 sg_dma_address(prm->sg))); 1752 } 1753 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1754 1755 ql_dbg(ql_dbg_tgt, vha, 0xe010, 1756 "S/G Segment phys_addr=%llx:%llx, len=%d\n", 1757 (long long unsigned int)pci_dma_hi32(sg_dma_address( 1758 prm->sg)), 1759 (long long unsigned int)pci_dma_lo32(sg_dma_address( 1760 prm->sg)), 1761 (int)sg_dma_len(prm->sg)); 1762 1763 prm->sg = sg_next(prm->sg); 1764 } 1765 1766 qlt_load_cont_data_segments(prm, vha); 1767 } 1768 1769 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 1770 { 1771 return cmd->bufflen > 0; 1772 } 1773 1774 /* 1775 * Called without ha->hardware_lock held 1776 */ 1777 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 1778 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 1779 uint32_t *full_req_cnt) 1780 { 1781 struct qla_tgt *tgt = cmd->tgt; 1782 struct scsi_qla_host *vha = tgt->vha; 1783 struct qla_hw_data *ha = vha->hw; 1784 struct se_cmd *se_cmd = &cmd->se_cmd; 1785 1786 if (unlikely(cmd->aborted)) { 1787 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 1788 "qla_target(%d): terminating exchange " 1789 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd, 1790 se_cmd, cmd->tag); 1791 1792 cmd->state = QLA_TGT_STATE_ABORTED; 1793 1794 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1795 1796 /* !! At this point cmd could be already freed !! */ 1797 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1798 } 1799 1800 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", 1801 vha->vp_idx, cmd->tag); 1802 1803 prm->cmd = cmd; 1804 prm->tgt = tgt; 1805 prm->rq_result = scsi_status; 1806 prm->sense_buffer = &cmd->sense_buffer[0]; 1807 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 1808 prm->sg = NULL; 1809 prm->seg_cnt = -1; 1810 prm->req_cnt = 1; 1811 prm->add_status_pkt = 0; 1812 1813 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n", 1814 prm->rq_result, xmit_type); 1815 1816 /* Send marker if required */ 1817 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1818 return -EFAULT; 1819 1820 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx); 1821 1822 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1823 if (qlt_pci_map_calc_cnt(prm) != 0) 1824 return -EAGAIN; 1825 } 1826 1827 *full_req_cnt = prm->req_cnt; 1828 1829 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1830 prm->residual = se_cmd->residual_count; 1831 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1832 "Residual underflow: %d (tag %d, " 1833 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1834 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1835 cmd->bufflen, prm->rq_result); 1836 prm->rq_result |= SS_RESIDUAL_UNDER; 1837 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1838 prm->residual = se_cmd->residual_count; 1839 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1840 "Residual overflow: %d (tag %d, " 1841 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1842 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1843 cmd->bufflen, prm->rq_result); 1844 prm->rq_result |= SS_RESIDUAL_OVER; 1845 } 1846 1847 if (xmit_type & QLA_TGT_XMIT_STATUS) { 1848 /* 1849 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 1850 * ignored in *xmit_response() below 1851 */ 1852 if (qlt_has_data(cmd)) { 1853 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 1854 (IS_FWI2_CAPABLE(ha) && 1855 (prm->rq_result != 0))) { 1856 prm->add_status_pkt = 1; 1857 (*full_req_cnt)++; 1858 } 1859 } 1860 } 1861 1862 ql_dbg(ql_dbg_tgt, vha, 0xe016, 1863 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n", 1864 prm->req_cnt, *full_req_cnt, prm->add_status_pkt); 1865 1866 return 0; 1867 } 1868 1869 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha, 1870 struct qla_tgt_cmd *cmd, int sending_sense) 1871 { 1872 if (ha->tgt.enable_class_2) 1873 return 0; 1874 1875 if (sending_sense) 1876 return cmd->conf_compl_supported; 1877 else 1878 return ha->tgt.enable_explicit_conf && 1879 cmd->conf_compl_supported; 1880 } 1881 1882 #ifdef CONFIG_QLA_TGT_DEBUG_SRR 1883 /* 1884 * Original taken from the XFS code 1885 */ 1886 static unsigned long qlt_srr_random(void) 1887 { 1888 static int Inited; 1889 static unsigned long RandomValue; 1890 static DEFINE_SPINLOCK(lock); 1891 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */ 1892 register long rv; 1893 register long lo; 1894 register long hi; 1895 unsigned long flags; 1896 1897 spin_lock_irqsave(&lock, flags); 1898 if (!Inited) { 1899 RandomValue = jiffies; 1900 Inited = 1; 1901 } 1902 rv = RandomValue; 1903 hi = rv / 127773; 1904 lo = rv % 127773; 1905 rv = 16807 * lo - 2836 * hi; 1906 if (rv <= 0) 1907 rv += 2147483647; 1908 RandomValue = rv; 1909 spin_unlock_irqrestore(&lock, flags); 1910 return rv; 1911 } 1912 1913 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1914 { 1915 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */ 1916 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200) 1917 == 50) { 1918 *xmit_type &= ~QLA_TGT_XMIT_STATUS; 1919 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015, 1920 "Dropping cmd %p (tag %d) status", cmd, cmd->tag); 1921 } 1922 #endif 1923 /* 1924 * It's currently not possible to simulate SRRs for FCP_WRITE without 1925 * a physical link layer failure, so don't even try here.. 1926 */ 1927 if (cmd->dma_data_direction != DMA_FROM_DEVICE) 1928 return; 1929 1930 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) && 1931 ((qlt_srr_random() % 100) == 20)) { 1932 int i, leave = 0; 1933 unsigned int tot_len = 0; 1934 1935 while (leave == 0) 1936 leave = qlt_srr_random() % cmd->sg_cnt; 1937 1938 for (i = 0; i < leave; i++) 1939 tot_len += cmd->sg[i].length; 1940 1941 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016, 1942 "Cutting cmd %p (tag %d) buffer" 1943 " tail to len %d, sg_cnt %d (cmd->bufflen %d," 1944 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave, 1945 cmd->bufflen, cmd->sg_cnt); 1946 1947 cmd->bufflen = tot_len; 1948 cmd->sg_cnt = leave; 1949 } 1950 1951 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) { 1952 unsigned int offset = qlt_srr_random() % cmd->bufflen; 1953 1954 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017, 1955 "Cutting cmd %p (tag %d) buffer head " 1956 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset, 1957 cmd->bufflen); 1958 if (offset == 0) 1959 *xmit_type &= ~QLA_TGT_XMIT_DATA; 1960 else if (qlt_set_data_offset(cmd, offset)) { 1961 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018, 1962 "qlt_set_data_offset() failed (tag %d)", cmd->tag); 1963 } 1964 } 1965 } 1966 #else 1967 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type) 1968 {} 1969 #endif 1970 1971 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 1972 struct qla_tgt_prm *prm) 1973 { 1974 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 1975 (uint32_t)sizeof(ctio->u.status1.sense_data)); 1976 ctio->u.status0.flags |= 1977 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 1978 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 1979 ctio->u.status0.flags |= __constant_cpu_to_le16( 1980 CTIO7_FLAGS_EXPLICIT_CONFORM | 1981 CTIO7_FLAGS_CONFORM_REQ); 1982 } 1983 ctio->u.status0.residual = cpu_to_le32(prm->residual); 1984 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 1985 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 1986 int i; 1987 1988 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) { 1989 if (prm->cmd->se_cmd.scsi_status != 0) { 1990 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017, 1991 "Skipping EXPLICIT_CONFORM and " 1992 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 1993 "non GOOD status\n"); 1994 goto skip_explict_conf; 1995 } 1996 ctio->u.status1.flags |= __constant_cpu_to_le16( 1997 CTIO7_FLAGS_EXPLICIT_CONFORM | 1998 CTIO7_FLAGS_CONFORM_REQ); 1999 } 2000 skip_explict_conf: 2001 ctio->u.status1.flags &= 2002 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2003 ctio->u.status1.flags |= 2004 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2005 ctio->u.status1.scsi_status |= 2006 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 2007 ctio->u.status1.sense_length = 2008 cpu_to_le16(prm->sense_buffer_len); 2009 for (i = 0; i < prm->sense_buffer_len/4; i++) 2010 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2011 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2012 #if 0 2013 if (unlikely((prm->sense_buffer_len % 4) != 0)) { 2014 static int q; 2015 if (q < 10) { 2016 ql_dbg(ql_dbg_tgt, vha, 0xe04f, 2017 "qla_target(%d): %d bytes of sense " 2018 "lost", prm->tgt->ha->vp_idx, 2019 prm->sense_buffer_len % 4); 2020 q++; 2021 } 2022 } 2023 #endif 2024 } else { 2025 ctio->u.status1.flags &= 2026 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2027 ctio->u.status1.flags |= 2028 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2029 ctio->u.status1.sense_length = 0; 2030 memset(ctio->u.status1.sense_data, 0, 2031 sizeof(ctio->u.status1.sense_data)); 2032 } 2033 2034 /* Sense with len > 24, is it possible ??? */ 2035 } 2036 2037 /* 2038 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 2039 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 2040 */ 2041 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 2042 uint8_t scsi_status) 2043 { 2044 struct scsi_qla_host *vha = cmd->vha; 2045 struct qla_hw_data *ha = vha->hw; 2046 struct ctio7_to_24xx *pkt; 2047 struct qla_tgt_prm prm; 2048 uint32_t full_req_cnt = 0; 2049 unsigned long flags = 0; 2050 int res; 2051 2052 memset(&prm, 0, sizeof(prm)); 2053 qlt_check_srr_debug(cmd, &xmit_type); 2054 2055 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, 2056 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " 2057 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 2058 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); 2059 2060 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2061 &full_req_cnt); 2062 if (unlikely(res != 0)) { 2063 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) 2064 return 0; 2065 2066 return res; 2067 } 2068 2069 spin_lock_irqsave(&ha->hardware_lock, flags); 2070 2071 /* Does F/W have an IOCBs for this request */ 2072 res = qlt_check_reserve_free_req(vha, full_req_cnt); 2073 if (unlikely(res)) 2074 goto out_unmap_unlock; 2075 2076 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2077 if (unlikely(res != 0)) 2078 goto out_unmap_unlock; 2079 2080 2081 pkt = (struct ctio7_to_24xx *)prm.pkt; 2082 2083 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 2084 pkt->u.status0.flags |= 2085 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2086 CTIO7_FLAGS_STATUS_MODE_0); 2087 2088 qlt_load_data_segments(&prm, vha); 2089 2090 if (prm.add_status_pkt == 0) { 2091 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2092 pkt->u.status0.scsi_status = 2093 cpu_to_le16(prm.rq_result); 2094 pkt->u.status0.residual = 2095 cpu_to_le32(prm.residual); 2096 pkt->u.status0.flags |= __constant_cpu_to_le16( 2097 CTIO7_FLAGS_SEND_STATUS); 2098 if (qlt_need_explicit_conf(ha, cmd, 0)) { 2099 pkt->u.status0.flags |= 2100 __constant_cpu_to_le16( 2101 CTIO7_FLAGS_EXPLICIT_CONFORM | 2102 CTIO7_FLAGS_CONFORM_REQ); 2103 } 2104 } 2105 2106 } else { 2107 /* 2108 * We have already made sure that there is sufficient 2109 * amount of request entries to not drop HW lock in 2110 * req_pkt(). 2111 */ 2112 struct ctio7_to_24xx *ctio = 2113 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 2114 2115 ql_dbg(ql_dbg_tgt, vha, 0xe019, 2116 "Building additional status packet\n"); 2117 2118 memcpy(ctio, pkt, sizeof(*ctio)); 2119 ctio->entry_count = 1; 2120 ctio->dseg_count = 0; 2121 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 2122 CTIO7_FLAGS_DATA_IN); 2123 2124 /* Real finish is ctio_m1's finish */ 2125 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2126 pkt->u.status0.flags |= __constant_cpu_to_le16( 2127 CTIO7_FLAGS_DONT_RET_CTIO); 2128 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 2129 &prm); 2130 pr_debug("Status CTIO7: %p\n", ctio); 2131 } 2132 } else 2133 qlt_24xx_init_ctio_to_isp(pkt, &prm); 2134 2135 2136 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2137 2138 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 2139 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 2140 pkt, scsi_status); 2141 2142 qla2x00_start_iocbs(vha, vha->req); 2143 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2144 2145 return 0; 2146 2147 out_unmap_unlock: 2148 if (cmd->sg_mapped) 2149 qlt_unmap_sg(vha, cmd); 2150 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2151 2152 return res; 2153 } 2154 EXPORT_SYMBOL(qlt_xmit_response); 2155 2156 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 2157 { 2158 struct ctio7_to_24xx *pkt; 2159 struct scsi_qla_host *vha = cmd->vha; 2160 struct qla_hw_data *ha = vha->hw; 2161 struct qla_tgt *tgt = cmd->tgt; 2162 struct qla_tgt_prm prm; 2163 unsigned long flags; 2164 int res = 0; 2165 2166 memset(&prm, 0, sizeof(prm)); 2167 prm.cmd = cmd; 2168 prm.tgt = tgt; 2169 prm.sg = NULL; 2170 prm.req_cnt = 1; 2171 2172 /* Send marker if required */ 2173 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2174 return -EIO; 2175 2176 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", 2177 (int)vha->vp_idx); 2178 2179 /* Calculate number of entries and segments required */ 2180 if (qlt_pci_map_calc_cnt(&prm) != 0) 2181 return -EAGAIN; 2182 2183 spin_lock_irqsave(&ha->hardware_lock, flags); 2184 2185 /* Does F/W have an IOCBs for this request */ 2186 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2187 if (res != 0) 2188 goto out_unlock_free_unmap; 2189 2190 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2191 if (unlikely(res != 0)) 2192 goto out_unlock_free_unmap; 2193 pkt = (struct ctio7_to_24xx *)prm.pkt; 2194 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2195 CTIO7_FLAGS_STATUS_MODE_0); 2196 qlt_load_data_segments(&prm, vha); 2197 2198 cmd->state = QLA_TGT_STATE_NEED_DATA; 2199 2200 qla2x00_start_iocbs(vha, vha->req); 2201 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2202 2203 return res; 2204 2205 out_unlock_free_unmap: 2206 if (cmd->sg_mapped) 2207 qlt_unmap_sg(vha, cmd); 2208 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2209 2210 return res; 2211 } 2212 EXPORT_SYMBOL(qlt_rdy_to_xfer); 2213 2214 /* If hardware_lock held on entry, might drop it, then reaquire */ 2215 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2216 static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2217 struct qla_tgt_cmd *cmd, 2218 struct atio_from_isp *atio) 2219 { 2220 struct ctio7_to_24xx *ctio24; 2221 struct qla_hw_data *ha = vha->hw; 2222 request_t *pkt; 2223 int ret = 0; 2224 2225 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2226 2227 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2228 if (pkt == NULL) { 2229 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2230 "qla_target(%d): %s failed: unable to allocate " 2231 "request packet\n", vha->vp_idx, __func__); 2232 return -ENOMEM; 2233 } 2234 2235 if (cmd != NULL) { 2236 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 2237 ql_dbg(ql_dbg_tgt, vha, 0xe051, 2238 "qla_target(%d): Terminating cmd %p with " 2239 "incorrect state %d\n", vha->vp_idx, cmd, 2240 cmd->state); 2241 } else 2242 ret = 1; 2243 } 2244 2245 pkt->entry_count = 1; 2246 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2247 2248 ctio24 = (struct ctio7_to_24xx *)pkt; 2249 ctio24->entry_type = CTIO_TYPE7; 2250 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2251 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2252 ctio24->vp_index = vha->vp_idx; 2253 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2254 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2255 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2256 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2257 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2258 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2259 CTIO7_FLAGS_TERMINATE); 2260 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2261 2262 /* Most likely, it isn't needed */ 2263 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2264 &atio->u.isp24.fcp_cmnd.add_cdb[ 2265 atio->u.isp24.fcp_cmnd.add_cdb_len]); 2266 if (ctio24->u.status1.residual != 0) 2267 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2268 2269 qla2x00_start_iocbs(vha, vha->req); 2270 return ret; 2271 } 2272 2273 static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2274 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2275 { 2276 unsigned long flags; 2277 int rc; 2278 2279 if (qlt_issue_marker(vha, ha_locked) < 0) 2280 return; 2281 2282 if (ha_locked) { 2283 rc = __qlt_send_term_exchange(vha, cmd, atio); 2284 goto done; 2285 } 2286 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2287 rc = __qlt_send_term_exchange(vha, cmd, atio); 2288 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2289 done: 2290 if (rc == 1) { 2291 if (!ha_locked && !in_interrupt()) 2292 msleep(250); /* just in case */ 2293 2294 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2295 } 2296 } 2297 2298 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2299 { 2300 BUG_ON(cmd->sg_mapped); 2301 2302 if (unlikely(cmd->free_sg)) 2303 kfree(cmd->sg); 2304 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2305 } 2306 EXPORT_SYMBOL(qlt_free_cmd); 2307 2308 /* ha->hardware_lock supposed to be held on entry */ 2309 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, 2310 struct qla_tgt_cmd *cmd, void *ctio) 2311 { 2312 struct qla_tgt_srr_ctio *sc; 2313 struct qla_hw_data *ha = vha->hw; 2314 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2315 struct qla_tgt_srr_imm *imm; 2316 2317 tgt->ctio_srr_id++; 2318 2319 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2320 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2321 2322 if (!ctio) { 2323 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055, 2324 "qla_target(%d): SRR CTIO, but ctio is NULL\n", 2325 vha->vp_idx); 2326 return -EINVAL; 2327 } 2328 2329 sc = kzalloc(sizeof(*sc), GFP_ATOMIC); 2330 if (sc != NULL) { 2331 sc->cmd = cmd; 2332 /* IRQ is already OFF */ 2333 spin_lock(&tgt->srr_lock); 2334 sc->srr_id = tgt->ctio_srr_id; 2335 list_add_tail(&sc->srr_list_entry, 2336 &tgt->srr_ctio_list); 2337 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 2338 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id); 2339 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 2340 int found = 0; 2341 list_for_each_entry(imm, &tgt->srr_imm_list, 2342 srr_list_entry) { 2343 if (imm->srr_id == sc->srr_id) { 2344 found = 1; 2345 break; 2346 } 2347 } 2348 if (found) { 2349 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b, 2350 "Scheduling srr work\n"); 2351 schedule_work(&tgt->srr_work); 2352 } else { 2353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056, 2354 "qla_target(%d): imm_srr_id " 2355 "== ctio_srr_id (%d), but there is no " 2356 "corresponding SRR IMM, deleting CTIO " 2357 "SRR %p\n", vha->vp_idx, 2358 tgt->ctio_srr_id, sc); 2359 list_del(&sc->srr_list_entry); 2360 spin_unlock(&tgt->srr_lock); 2361 2362 kfree(sc); 2363 return -EINVAL; 2364 } 2365 } 2366 spin_unlock(&tgt->srr_lock); 2367 } else { 2368 struct qla_tgt_srr_imm *ti; 2369 2370 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057, 2371 "qla_target(%d): Unable to allocate SRR CTIO entry\n", 2372 vha->vp_idx); 2373 spin_lock(&tgt->srr_lock); 2374 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list, 2375 srr_list_entry) { 2376 if (imm->srr_id == tgt->ctio_srr_id) { 2377 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c, 2378 "IMM SRR %p deleted (id %d)\n", 2379 imm, imm->srr_id); 2380 list_del(&imm->srr_list_entry); 2381 qlt_reject_free_srr_imm(vha, imm, 1); 2382 } 2383 } 2384 spin_unlock(&tgt->srr_lock); 2385 2386 return -ENOMEM; 2387 } 2388 2389 return 0; 2390 } 2391 2392 /* 2393 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2394 */ 2395 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, 2396 struct qla_tgt_cmd *cmd, uint32_t status) 2397 { 2398 int term = 0; 2399 2400 if (ctio != NULL) { 2401 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 2402 term = !(c->flags & 2403 __constant_cpu_to_le16(OF_TERM_EXCH)); 2404 } else 2405 term = 1; 2406 2407 if (term) 2408 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2409 2410 return term; 2411 } 2412 2413 /* ha->hardware_lock supposed to be held on entry */ 2414 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha, 2415 uint32_t handle) 2416 { 2417 struct qla_hw_data *ha = vha->hw; 2418 2419 handle--; 2420 if (ha->tgt.cmds[handle] != NULL) { 2421 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle]; 2422 ha->tgt.cmds[handle] = NULL; 2423 return cmd; 2424 } else 2425 return NULL; 2426 } 2427 2428 /* ha->hardware_lock supposed to be held on entry */ 2429 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 2430 uint32_t handle, void *ctio) 2431 { 2432 struct qla_tgt_cmd *cmd = NULL; 2433 2434 /* Clear out internal marks */ 2435 handle &= ~(CTIO_COMPLETION_HANDLE_MARK | 2436 CTIO_INTERMEDIATE_HANDLE_MARK); 2437 2438 if (handle != QLA_TGT_NULL_HANDLE) { 2439 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2440 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s", 2441 "SKIP_HANDLE CTIO\n"); 2442 return NULL; 2443 } 2444 /* handle-1 is actually used */ 2445 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2446 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2447 "qla_target(%d): Wrong handle %x received\n", 2448 vha->vp_idx, handle); 2449 return NULL; 2450 } 2451 cmd = qlt_get_cmd(vha, handle); 2452 if (unlikely(cmd == NULL)) { 2453 ql_dbg(ql_dbg_tgt, vha, 0xe053, 2454 "qla_target(%d): Suspicious: unable to " 2455 "find the command with handle %x\n", vha->vp_idx, 2456 handle); 2457 return NULL; 2458 } 2459 } else if (ctio != NULL) { 2460 /* We can't get loop ID from CTIO7 */ 2461 ql_dbg(ql_dbg_tgt, vha, 0xe054, 2462 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 2463 "support NULL handles\n", vha->vp_idx); 2464 return NULL; 2465 } 2466 2467 return cmd; 2468 } 2469 2470 /* 2471 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2472 */ 2473 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, 2474 uint32_t status, void *ctio) 2475 { 2476 struct qla_hw_data *ha = vha->hw; 2477 struct se_cmd *se_cmd; 2478 struct target_core_fabric_ops *tfo; 2479 struct qla_tgt_cmd *cmd; 2480 2481 ql_dbg(ql_dbg_tgt, vha, 0xe01e, 2482 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n", 2483 vha->vp_idx, ctio, status, handle); 2484 2485 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 2486 /* That could happen only in case of an error/reset/abort */ 2487 if (status != CTIO_SUCCESS) { 2488 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 2489 "Intermediate CTIO received" 2490 " (status %x)\n", status); 2491 } 2492 return; 2493 } 2494 2495 cmd = qlt_ctio_to_cmd(vha, handle, ctio); 2496 if (cmd == NULL) 2497 return; 2498 2499 se_cmd = &cmd->se_cmd; 2500 tfo = se_cmd->se_tfo; 2501 2502 if (cmd->sg_mapped) 2503 qlt_unmap_sg(vha, cmd); 2504 2505 if (unlikely(status != CTIO_SUCCESS)) { 2506 switch (status & 0xFFFF) { 2507 case CTIO_LIP_RESET: 2508 case CTIO_TARGET_RESET: 2509 case CTIO_ABORTED: 2510 case CTIO_TIMEOUT: 2511 case CTIO_INVALID_RX_ID: 2512 /* They are OK */ 2513 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 2514 "qla_target(%d): CTIO with " 2515 "status %#x received, state %x, se_cmd %p, " 2516 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 2517 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 2518 status, cmd->state, se_cmd); 2519 break; 2520 2521 case CTIO_PORT_LOGGED_OUT: 2522 case CTIO_PORT_UNAVAILABLE: 2523 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 2524 "qla_target(%d): CTIO with PORT LOGGED " 2525 "OUT (29) or PORT UNAVAILABLE (28) status %x " 2526 "received (state %x, se_cmd %p)\n", vha->vp_idx, 2527 status, cmd->state, se_cmd); 2528 break; 2529 2530 case CTIO_SRR_RECEIVED: 2531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a, 2532 "qla_target(%d): CTIO with SRR_RECEIVED" 2533 " status %x received (state %x, se_cmd %p)\n", 2534 vha->vp_idx, status, cmd->state, se_cmd); 2535 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0) 2536 break; 2537 else 2538 return; 2539 2540 default: 2541 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 2542 "qla_target(%d): CTIO with error status " 2543 "0x%x received (state %x, se_cmd %p\n", 2544 vha->vp_idx, status, cmd->state, se_cmd); 2545 break; 2546 } 2547 2548 if (cmd->state != QLA_TGT_STATE_NEED_DATA) 2549 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 2550 return; 2551 } 2552 2553 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 2554 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 2555 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 2556 int rx_status = 0; 2557 2558 cmd->state = QLA_TGT_STATE_DATA_IN; 2559 2560 if (unlikely(status != CTIO_SUCCESS)) 2561 rx_status = -EIO; 2562 else 2563 cmd->write_data_transferred = 1; 2564 2565 ql_dbg(ql_dbg_tgt, vha, 0xe020, 2566 "Data received, context %x, rx_status %d\n", 2567 0x0, rx_status); 2568 2569 ha->tgt.tgt_ops->handle_data(cmd); 2570 return; 2571 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 2572 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 2573 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag); 2574 } else { 2575 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 2576 "qla_target(%d): A command in state (%d) should " 2577 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 2578 } 2579 2580 if (unlikely(status != CTIO_SUCCESS)) { 2581 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 2582 dump_stack(); 2583 } 2584 2585 ha->tgt.tgt_ops->free_cmd(cmd); 2586 } 2587 2588 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 2589 uint8_t task_codes) 2590 { 2591 int fcp_task_attr; 2592 2593 switch (task_codes) { 2594 case ATIO_SIMPLE_QUEUE: 2595 fcp_task_attr = MSG_SIMPLE_TAG; 2596 break; 2597 case ATIO_HEAD_OF_QUEUE: 2598 fcp_task_attr = MSG_HEAD_TAG; 2599 break; 2600 case ATIO_ORDERED_QUEUE: 2601 fcp_task_attr = MSG_ORDERED_TAG; 2602 break; 2603 case ATIO_ACA_QUEUE: 2604 fcp_task_attr = MSG_ACA_TAG; 2605 break; 2606 case ATIO_UNTAGGED: 2607 fcp_task_attr = MSG_SIMPLE_TAG; 2608 break; 2609 default: 2610 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 2611 "qla_target: unknown task code %x, use ORDERED instead\n", 2612 task_codes); 2613 fcp_task_attr = MSG_ORDERED_TAG; 2614 break; 2615 } 2616 2617 return fcp_task_attr; 2618 } 2619 2620 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, 2621 uint8_t *); 2622 /* 2623 * Process context for I/O path into tcm_qla2xxx code 2624 */ 2625 static void qlt_do_work(struct work_struct *work) 2626 { 2627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 2628 scsi_qla_host_t *vha = cmd->vha; 2629 struct qla_hw_data *ha = vha->hw; 2630 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2631 struct qla_tgt_sess *sess = NULL; 2632 struct atio_from_isp *atio = &cmd->atio; 2633 unsigned char *cdb; 2634 unsigned long flags; 2635 uint32_t data_length; 2636 int ret, fcp_task_attr, data_dir, bidi = 0; 2637 2638 if (tgt->tgt_stop) 2639 goto out_term; 2640 2641 spin_lock_irqsave(&ha->hardware_lock, flags); 2642 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2643 atio->u.isp24.fcp_hdr.s_id); 2644 /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ 2645 if (sess) 2646 kref_get(&sess->se_sess->sess_kref); 2647 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2648 2649 if (unlikely(!sess)) { 2650 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; 2651 2652 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 2653 "qla_target(%d): Unable to find wwn login" 2654 " (s_id %x:%x:%x), trying to create it manually\n", 2655 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 2656 2657 if (atio->u.raw.entry_count > 1) { 2658 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 2659 "Dropping multy entry cmd %p\n", cmd); 2660 goto out_term; 2661 } 2662 2663 mutex_lock(&ha->tgt.tgt_mutex); 2664 sess = qlt_make_local_sess(vha, s_id); 2665 /* sess has an extra creation ref. */ 2666 mutex_unlock(&ha->tgt.tgt_mutex); 2667 2668 if (!sess) 2669 goto out_term; 2670 } 2671 2672 cmd->sess = sess; 2673 cmd->loop_id = sess->loop_id; 2674 cmd->conf_compl_supported = sess->conf_compl_supported; 2675 2676 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 2677 cmd->tag = atio->u.isp24.exchange_addr; 2678 cmd->unpacked_lun = scsilun_to_int( 2679 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 2680 2681 if (atio->u.isp24.fcp_cmnd.rddata && 2682 atio->u.isp24.fcp_cmnd.wrdata) { 2683 bidi = 1; 2684 data_dir = DMA_TO_DEVICE; 2685 } else if (atio->u.isp24.fcp_cmnd.rddata) 2686 data_dir = DMA_FROM_DEVICE; 2687 else if (atio->u.isp24.fcp_cmnd.wrdata) 2688 data_dir = DMA_TO_DEVICE; 2689 else 2690 data_dir = DMA_NONE; 2691 2692 fcp_task_attr = qlt_get_fcp_task_attr(vha, 2693 atio->u.isp24.fcp_cmnd.task_attr); 2694 data_length = be32_to_cpu(get_unaligned((uint32_t *) 2695 &atio->u.isp24.fcp_cmnd.add_cdb[ 2696 atio->u.isp24.fcp_cmnd.add_cdb_len])); 2697 2698 ql_dbg(ql_dbg_tgt, vha, 0xe022, 2699 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", 2700 cmd, cmd->unpacked_lun, cmd->tag); 2701 2702 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 2703 fcp_task_attr, data_dir, bidi); 2704 if (ret != 0) 2705 goto out_term; 2706 /* 2707 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 2708 */ 2709 ha->tgt.tgt_ops->put_sess(sess); 2710 return; 2711 2712 out_term: 2713 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 2714 /* 2715 * cmd has not sent to target yet, so pass NULL as the second 2716 * argument to qlt_send_term_exchange() and free the memory here. 2717 */ 2718 spin_lock_irqsave(&ha->hardware_lock, flags); 2719 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 2720 kmem_cache_free(qla_tgt_cmd_cachep, cmd); 2721 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2722 if (sess) 2723 ha->tgt.tgt_ops->put_sess(sess); 2724 } 2725 2726 /* ha->hardware_lock supposed to be held on entry */ 2727 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 2728 struct atio_from_isp *atio) 2729 { 2730 struct qla_hw_data *ha = vha->hw; 2731 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2732 struct qla_tgt_cmd *cmd; 2733 2734 if (unlikely(tgt->tgt_stop)) { 2735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 2736 "New command while device %p is shutting down\n", tgt); 2737 return -EFAULT; 2738 } 2739 2740 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); 2741 if (!cmd) { 2742 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 2743 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 2744 return -ENOMEM; 2745 } 2746 2747 INIT_LIST_HEAD(&cmd->cmd_list); 2748 2749 memcpy(&cmd->atio, atio, sizeof(*atio)); 2750 cmd->state = QLA_TGT_STATE_NEW; 2751 cmd->tgt = ha->tgt.qla_tgt; 2752 cmd->vha = vha; 2753 2754 INIT_WORK(&cmd->work, qlt_do_work); 2755 queue_work(qla_tgt_wq, &cmd->work); 2756 return 0; 2757 2758 } 2759 2760 /* ha->hardware_lock supposed to be held on entry */ 2761 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 2762 int fn, void *iocb, int flags) 2763 { 2764 struct scsi_qla_host *vha = sess->vha; 2765 struct qla_hw_data *ha = vha->hw; 2766 struct qla_tgt_mgmt_cmd *mcmd; 2767 int res; 2768 uint8_t tmr_func; 2769 2770 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2771 if (!mcmd) { 2772 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 2773 "qla_target(%d): Allocation of management " 2774 "command failed, some commands and their data could " 2775 "leak\n", vha->vp_idx); 2776 return -ENOMEM; 2777 } 2778 memset(mcmd, 0, sizeof(*mcmd)); 2779 mcmd->sess = sess; 2780 2781 if (iocb) { 2782 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2783 sizeof(mcmd->orig_iocb.imm_ntfy)); 2784 } 2785 mcmd->tmr_func = fn; 2786 mcmd->flags = flags; 2787 2788 switch (fn) { 2789 case QLA_TGT_CLEAR_ACA: 2790 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000, 2791 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx); 2792 tmr_func = TMR_CLEAR_ACA; 2793 break; 2794 2795 case QLA_TGT_TARGET_RESET: 2796 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001, 2797 "qla_target(%d): TARGET_RESET received\n", 2798 sess->vha->vp_idx); 2799 tmr_func = TMR_TARGET_WARM_RESET; 2800 break; 2801 2802 case QLA_TGT_LUN_RESET: 2803 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 2804 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 2805 tmr_func = TMR_LUN_RESET; 2806 break; 2807 2808 case QLA_TGT_CLEAR_TS: 2809 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003, 2810 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx); 2811 tmr_func = TMR_CLEAR_TASK_SET; 2812 break; 2813 2814 case QLA_TGT_ABORT_TS: 2815 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004, 2816 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx); 2817 tmr_func = TMR_ABORT_TASK_SET; 2818 break; 2819 #if 0 2820 case QLA_TGT_ABORT_ALL: 2821 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005, 2822 "qla_target(%d): Doing ABORT_ALL_TASKS\n", 2823 sess->vha->vp_idx); 2824 tmr_func = 0; 2825 break; 2826 2827 case QLA_TGT_ABORT_ALL_SESS: 2828 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006, 2829 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n", 2830 sess->vha->vp_idx); 2831 tmr_func = 0; 2832 break; 2833 2834 case QLA_TGT_NEXUS_LOSS_SESS: 2835 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007, 2836 "qla_target(%d): Doing NEXUS_LOSS_SESS\n", 2837 sess->vha->vp_idx); 2838 tmr_func = 0; 2839 break; 2840 2841 case QLA_TGT_NEXUS_LOSS: 2842 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008, 2843 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx); 2844 tmr_func = 0; 2845 break; 2846 #endif 2847 default: 2848 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a, 2849 "qla_target(%d): Unknown task mgmt fn 0x%x\n", 2850 sess->vha->vp_idx, fn); 2851 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2852 return -ENOSYS; 2853 } 2854 2855 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0); 2856 if (res != 0) { 2857 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b, 2858 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n", 2859 sess->vha->vp_idx, res); 2860 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2861 return -EFAULT; 2862 } 2863 2864 return 0; 2865 } 2866 2867 /* ha->hardware_lock supposed to be held on entry */ 2868 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 2869 { 2870 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2871 struct qla_hw_data *ha = vha->hw; 2872 struct qla_tgt *tgt; 2873 struct qla_tgt_sess *sess; 2874 uint32_t lun, unpacked_lun; 2875 int lun_size, fn; 2876 2877 tgt = ha->tgt.qla_tgt; 2878 2879 lun = a->u.isp24.fcp_cmnd.lun; 2880 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); 2881 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 2882 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 2883 a->u.isp24.fcp_hdr.s_id); 2884 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2885 2886 if (!sess) { 2887 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024, 2888 "qla_target(%d): task mgmt fn 0x%x for " 2889 "non-existant session\n", vha->vp_idx, fn); 2890 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb, 2891 sizeof(struct atio_from_isp)); 2892 } 2893 2894 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 2895 } 2896 2897 /* ha->hardware_lock supposed to be held on entry */ 2898 static int __qlt_abort_task(struct scsi_qla_host *vha, 2899 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess) 2900 { 2901 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 2902 struct qla_hw_data *ha = vha->hw; 2903 struct qla_tgt_mgmt_cmd *mcmd; 2904 uint32_t lun, unpacked_lun; 2905 int rc; 2906 2907 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2908 if (mcmd == NULL) { 2909 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 2910 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 2911 vha->vp_idx, __func__); 2912 return -ENOMEM; 2913 } 2914 memset(mcmd, 0, sizeof(*mcmd)); 2915 2916 mcmd->sess = sess; 2917 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 2918 sizeof(mcmd->orig_iocb.imm_ntfy)); 2919 2920 lun = a->u.isp24.fcp_cmnd.lun; 2921 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 2922 2923 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 2924 le16_to_cpu(iocb->u.isp2x.seq_id)); 2925 if (rc != 0) { 2926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 2927 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 2928 vha->vp_idx, rc); 2929 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2930 return -EFAULT; 2931 } 2932 2933 return 0; 2934 } 2935 2936 /* ha->hardware_lock supposed to be held on entry */ 2937 static int qlt_abort_task(struct scsi_qla_host *vha, 2938 struct imm_ntfy_from_isp *iocb) 2939 { 2940 struct qla_hw_data *ha = vha->hw; 2941 struct qla_tgt_sess *sess; 2942 int loop_id; 2943 2944 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 2945 2946 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 2947 if (sess == NULL) { 2948 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 2949 "qla_target(%d): task abort for unexisting " 2950 "session\n", vha->vp_idx); 2951 return qlt_sched_sess_work(ha->tgt.qla_tgt, 2952 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 2953 } 2954 2955 return __qlt_abort_task(vha, iocb, sess); 2956 } 2957 2958 /* 2959 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2960 */ 2961 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 2962 struct imm_ntfy_from_isp *iocb) 2963 { 2964 struct qla_hw_data *ha = vha->hw; 2965 int res = 0; 2966 2967 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 2968 "qla_target(%d): Port ID: 0x%02x:%02x:%02x" 2969 " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], 2970 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], 2971 iocb->u.isp24.status_subcode); 2972 2973 switch (iocb->u.isp24.status_subcode) { 2974 case ELS_PLOGI: 2975 case ELS_FLOGI: 2976 case ELS_PRLI: 2977 case ELS_LOGO: 2978 case ELS_PRLO: 2979 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2980 break; 2981 case ELS_PDISC: 2982 case ELS_ADISC: 2983 { 2984 struct qla_tgt *tgt = ha->tgt.qla_tgt; 2985 if (tgt->link_reinit_iocb_pending) { 2986 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 2987 0, 0, 0, 0, 0, 0); 2988 tgt->link_reinit_iocb_pending = 0; 2989 } 2990 res = 1; /* send notify ack */ 2991 break; 2992 } 2993 2994 default: 2995 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 2996 "qla_target(%d): Unsupported ELS command %x " 2997 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 2998 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 2999 break; 3000 } 3001 3002 return res; 3003 } 3004 3005 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset) 3006 { 3007 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL; 3008 size_t first_offset = 0, rem_offset = offset, tmp = 0; 3009 int i, sg_srr_cnt, bufflen = 0; 3010 3011 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023, 3012 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, " 3013 "cmd->sg_cnt: %u, direction: %d\n", 3014 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 3015 3016 /* 3017 * FIXME: Reject non zero SRR relative offset until we can test 3018 * this code properly. 3019 */ 3020 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset); 3021 return -1; 3022 3023 if (!cmd->sg || !cmd->sg_cnt) { 3024 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055, 3025 "Missing cmd->sg or zero cmd->sg_cnt in" 3026 " qla_tgt_set_data_offset\n"); 3027 return -EINVAL; 3028 } 3029 /* 3030 * Walk the current cmd->sg list until we locate the new sg_srr_start 3031 */ 3032 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) { 3033 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024, 3034 "sg[%d]: %p page: %p, length: %d, offset: %d\n", 3035 i, sg, sg_page(sg), sg->length, sg->offset); 3036 3037 if ((sg->length + tmp) > offset) { 3038 first_offset = rem_offset; 3039 sg_srr_start = sg; 3040 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025, 3041 "Found matching sg[%d], using %p as sg_srr_start, " 3042 "and using first_offset: %zu\n", i, sg, 3043 first_offset); 3044 break; 3045 } 3046 tmp += sg->length; 3047 rem_offset -= sg->length; 3048 } 3049 3050 if (!sg_srr_start) { 3051 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056, 3052 "Unable to locate sg_srr_start for offset: %u\n", offset); 3053 return -EINVAL; 3054 } 3055 sg_srr_cnt = (cmd->sg_cnt - i); 3056 3057 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL); 3058 if (!sg_srr) { 3059 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057, 3060 "Unable to allocate sgp\n"); 3061 return -ENOMEM; 3062 } 3063 sg_init_table(sg_srr, sg_srr_cnt); 3064 sgp = &sg_srr[0]; 3065 /* 3066 * Walk the remaining list for sg_srr_start, mapping to the newly 3067 * allocated sg_srr taking first_offset into account. 3068 */ 3069 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) { 3070 if (first_offset) { 3071 sg_set_page(sgp, sg_page(sg), 3072 (sg->length - first_offset), first_offset); 3073 first_offset = 0; 3074 } else { 3075 sg_set_page(sgp, sg_page(sg), sg->length, 0); 3076 } 3077 bufflen += sgp->length; 3078 3079 sgp = sg_next(sgp); 3080 if (!sgp) 3081 break; 3082 } 3083 3084 cmd->sg = sg_srr; 3085 cmd->sg_cnt = sg_srr_cnt; 3086 cmd->bufflen = bufflen; 3087 cmd->offset += offset; 3088 cmd->free_sg = 1; 3089 3090 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg); 3091 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n", 3092 cmd->sg_cnt); 3093 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n", 3094 cmd->bufflen); 3095 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n", 3096 cmd->offset); 3097 3098 if (cmd->sg_cnt < 0) 3099 BUG(); 3100 3101 if (cmd->bufflen < 0) 3102 BUG(); 3103 3104 return 0; 3105 } 3106 3107 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd, 3108 uint32_t srr_rel_offs, int *xmit_type) 3109 { 3110 int res = 0, rel_offs; 3111 3112 rel_offs = srr_rel_offs - cmd->offset; 3113 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d", 3114 srr_rel_offs, rel_offs); 3115 3116 *xmit_type = QLA_TGT_XMIT_ALL; 3117 3118 if (rel_offs < 0) { 3119 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062, 3120 "qla_target(%d): SRR rel_offs (%d) < 0", 3121 cmd->vha->vp_idx, rel_offs); 3122 res = -1; 3123 } else if (rel_offs == cmd->bufflen) 3124 *xmit_type = QLA_TGT_XMIT_STATUS; 3125 else if (rel_offs > 0) 3126 res = qlt_set_data_offset(cmd, rel_offs); 3127 3128 return res; 3129 } 3130 3131 /* No locks, thread context */ 3132 static void qlt_handle_srr(struct scsi_qla_host *vha, 3133 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm) 3134 { 3135 struct imm_ntfy_from_isp *ntfy = 3136 (struct imm_ntfy_from_isp *)&imm->imm_ntfy; 3137 struct qla_hw_data *ha = vha->hw; 3138 struct qla_tgt_cmd *cmd = sctio->cmd; 3139 struct se_cmd *se_cmd = &cmd->se_cmd; 3140 unsigned long flags; 3141 int xmit_type = 0, resp = 0; 3142 uint32_t offset; 3143 uint16_t srr_ui; 3144 3145 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs); 3146 srr_ui = ntfy->u.isp24.srr_ui; 3147 3148 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n", 3149 cmd, srr_ui); 3150 3151 switch (srr_ui) { 3152 case SRR_IU_STATUS: 3153 spin_lock_irqsave(&ha->hardware_lock, flags); 3154 qlt_send_notify_ack(vha, ntfy, 3155 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3156 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3157 xmit_type = QLA_TGT_XMIT_STATUS; 3158 resp = 1; 3159 break; 3160 case SRR_IU_DATA_IN: 3161 if (!cmd->sg || !cmd->sg_cnt) { 3162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063, 3163 "Unable to process SRR_IU_DATA_IN due to" 3164 " missing cmd->sg, state: %d\n", cmd->state); 3165 dump_stack(); 3166 goto out_reject; 3167 } 3168 if (se_cmd->scsi_status != 0) { 3169 ql_dbg(ql_dbg_tgt, vha, 0xe02a, 3170 "Rejecting SRR_IU_DATA_IN with non GOOD " 3171 "scsi_status\n"); 3172 goto out_reject; 3173 } 3174 cmd->bufflen = se_cmd->data_length; 3175 3176 if (qlt_has_data(cmd)) { 3177 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3178 goto out_reject; 3179 spin_lock_irqsave(&ha->hardware_lock, flags); 3180 qlt_send_notify_ack(vha, ntfy, 3181 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3182 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3183 resp = 1; 3184 } else { 3185 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064, 3186 "qla_target(%d): SRR for in data for cmd " 3187 "without them (tag %d, SCSI status %d), " 3188 "reject", vha->vp_idx, cmd->tag, 3189 cmd->se_cmd.scsi_status); 3190 goto out_reject; 3191 } 3192 break; 3193 case SRR_IU_DATA_OUT: 3194 if (!cmd->sg || !cmd->sg_cnt) { 3195 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065, 3196 "Unable to process SRR_IU_DATA_OUT due to" 3197 " missing cmd->sg\n"); 3198 dump_stack(); 3199 goto out_reject; 3200 } 3201 if (se_cmd->scsi_status != 0) { 3202 ql_dbg(ql_dbg_tgt, vha, 0xe02b, 3203 "Rejecting SRR_IU_DATA_OUT" 3204 " with non GOOD scsi_status\n"); 3205 goto out_reject; 3206 } 3207 cmd->bufflen = se_cmd->data_length; 3208 3209 if (qlt_has_data(cmd)) { 3210 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0) 3211 goto out_reject; 3212 spin_lock_irqsave(&ha->hardware_lock, flags); 3213 qlt_send_notify_ack(vha, ntfy, 3214 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3215 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3216 if (xmit_type & QLA_TGT_XMIT_DATA) 3217 qlt_rdy_to_xfer(cmd); 3218 } else { 3219 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3220 "qla_target(%d): SRR for out data for cmd " 3221 "without them (tag %d, SCSI status %d), " 3222 "reject", vha->vp_idx, cmd->tag, 3223 cmd->se_cmd.scsi_status); 3224 goto out_reject; 3225 } 3226 break; 3227 default: 3228 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067, 3229 "qla_target(%d): Unknown srr_ui value %x", 3230 vha->vp_idx, srr_ui); 3231 goto out_reject; 3232 } 3233 3234 /* Transmit response in case of status and data-in cases */ 3235 if (resp) 3236 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3237 3238 return; 3239 3240 out_reject: 3241 spin_lock_irqsave(&ha->hardware_lock, flags); 3242 qlt_send_notify_ack(vha, ntfy, 0, 0, 0, 3243 NOTIFY_ACK_SRR_FLAGS_REJECT, 3244 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3245 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3246 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3247 cmd->state = QLA_TGT_STATE_DATA_IN; 3248 dump_stack(); 3249 } else 3250 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3251 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3252 } 3253 3254 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha, 3255 struct qla_tgt_srr_imm *imm, int ha_locked) 3256 { 3257 struct qla_hw_data *ha = vha->hw; 3258 unsigned long flags = 0; 3259 3260 if (!ha_locked) 3261 spin_lock_irqsave(&ha->hardware_lock, flags); 3262 3263 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 3264 NOTIFY_ACK_SRR_FLAGS_REJECT, 3265 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3266 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3267 3268 if (!ha_locked) 3269 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3270 3271 kfree(imm); 3272 } 3273 3274 static void qlt_handle_srr_work(struct work_struct *work) 3275 { 3276 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work); 3277 struct scsi_qla_host *vha = tgt->vha; 3278 struct qla_tgt_srr_ctio *sctio; 3279 unsigned long flags; 3280 3281 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n", 3282 tgt); 3283 3284 restart: 3285 spin_lock_irqsave(&tgt->srr_lock, flags); 3286 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) { 3287 struct qla_tgt_srr_imm *imm, *i, *ti; 3288 struct qla_tgt_cmd *cmd; 3289 struct se_cmd *se_cmd; 3290 3291 imm = NULL; 3292 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list, 3293 srr_list_entry) { 3294 if (i->srr_id == sctio->srr_id) { 3295 list_del(&i->srr_list_entry); 3296 if (imm) { 3297 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068, 3298 "qla_target(%d): There must be " 3299 "only one IMM SRR per CTIO SRR " 3300 "(IMM SRR %p, id %d, CTIO %p\n", 3301 vha->vp_idx, i, i->srr_id, sctio); 3302 qlt_reject_free_srr_imm(tgt->vha, i, 0); 3303 } else 3304 imm = i; 3305 } 3306 } 3307 3308 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a, 3309 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio, 3310 sctio->srr_id); 3311 3312 if (imm == NULL) { 3313 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b, 3314 "Not found matching IMM for SRR CTIO (id %d)\n", 3315 sctio->srr_id); 3316 continue; 3317 } else 3318 list_del(&sctio->srr_list_entry); 3319 3320 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3321 3322 cmd = sctio->cmd; 3323 /* 3324 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow 3325 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in() 3326 * logic.. 3327 */ 3328 cmd->offset = 0; 3329 if (cmd->free_sg) { 3330 kfree(cmd->sg); 3331 cmd->sg = NULL; 3332 cmd->free_sg = 0; 3333 } 3334 se_cmd = &cmd->se_cmd; 3335 3336 cmd->sg_cnt = se_cmd->t_data_nents; 3337 cmd->sg = se_cmd->t_data_sg; 3338 3339 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, 3340 "SRR cmd %p (se_cmd %p, tag %d, op %x), " 3341 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, 3342 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); 3343 3344 qlt_handle_srr(vha, sctio, imm); 3345 3346 kfree(imm); 3347 kfree(sctio); 3348 goto restart; 3349 } 3350 spin_unlock_irqrestore(&tgt->srr_lock, flags); 3351 } 3352 3353 /* ha->hardware_lock supposed to be held on entry */ 3354 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, 3355 struct imm_ntfy_from_isp *iocb) 3356 { 3357 struct qla_tgt_srr_imm *imm; 3358 struct qla_hw_data *ha = vha->hw; 3359 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3360 struct qla_tgt_srr_ctio *sctio; 3361 3362 tgt->imm_srr_id++; 3363 3364 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 3365 vha->vp_idx); 3366 3367 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 3368 if (imm != NULL) { 3369 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy)); 3370 3371 /* IRQ is already OFF */ 3372 spin_lock(&tgt->srr_lock); 3373 imm->srr_id = tgt->imm_srr_id; 3374 list_add_tail(&imm->srr_list_entry, 3375 &tgt->srr_imm_list); 3376 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e, 3377 "IMM NTFY SRR %p added (id %d, ui %x)\n", 3378 imm, imm->srr_id, iocb->u.isp24.srr_ui); 3379 if (tgt->imm_srr_id == tgt->ctio_srr_id) { 3380 int found = 0; 3381 list_for_each_entry(sctio, &tgt->srr_ctio_list, 3382 srr_list_entry) { 3383 if (sctio->srr_id == imm->srr_id) { 3384 found = 1; 3385 break; 3386 } 3387 } 3388 if (found) { 3389 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s", 3390 "Scheduling srr work\n"); 3391 schedule_work(&tgt->srr_work); 3392 } else { 3393 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030, 3394 "qla_target(%d): imm_srr_id " 3395 "== ctio_srr_id (%d), but there is no " 3396 "corresponding SRR CTIO, deleting IMM " 3397 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id, 3398 imm); 3399 list_del(&imm->srr_list_entry); 3400 3401 kfree(imm); 3402 3403 spin_unlock(&tgt->srr_lock); 3404 goto out_reject; 3405 } 3406 } 3407 spin_unlock(&tgt->srr_lock); 3408 } else { 3409 struct qla_tgt_srr_ctio *ts; 3410 3411 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069, 3412 "qla_target(%d): Unable to allocate SRR IMM " 3413 "entry, SRR request will be rejected\n", vha->vp_idx); 3414 3415 /* IRQ is already OFF */ 3416 spin_lock(&tgt->srr_lock); 3417 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list, 3418 srr_list_entry) { 3419 if (sctio->srr_id == tgt->imm_srr_id) { 3420 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031, 3421 "CTIO SRR %p deleted (id %d)\n", 3422 sctio, sctio->srr_id); 3423 list_del(&sctio->srr_list_entry); 3424 qlt_send_term_exchange(vha, sctio->cmd, 3425 &sctio->cmd->atio, 1); 3426 kfree(sctio); 3427 } 3428 } 3429 spin_unlock(&tgt->srr_lock); 3430 goto out_reject; 3431 } 3432 3433 return; 3434 3435 out_reject: 3436 qlt_send_notify_ack(vha, iocb, 0, 0, 0, 3437 NOTIFY_ACK_SRR_FLAGS_REJECT, 3438 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 3439 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 3440 } 3441 3442 /* 3443 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3444 */ 3445 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 3446 struct imm_ntfy_from_isp *iocb) 3447 { 3448 struct qla_hw_data *ha = vha->hw; 3449 uint32_t add_flags = 0; 3450 int send_notify_ack = 1; 3451 uint16_t status; 3452 3453 status = le16_to_cpu(iocb->u.isp2x.status); 3454 switch (status) { 3455 case IMM_NTFY_LIP_RESET: 3456 { 3457 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 3458 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 3459 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 3460 iocb->u.isp24.status_subcode); 3461 3462 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3463 send_notify_ack = 0; 3464 break; 3465 } 3466 3467 case IMM_NTFY_LIP_LINK_REINIT: 3468 { 3469 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3470 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 3471 "qla_target(%d): LINK REINIT (loop %#x, " 3472 "subcode %x)\n", vha->vp_idx, 3473 le16_to_cpu(iocb->u.isp24.nport_handle), 3474 iocb->u.isp24.status_subcode); 3475 if (tgt->link_reinit_iocb_pending) { 3476 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 3477 0, 0, 0, 0, 0, 0); 3478 } 3479 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 3480 tgt->link_reinit_iocb_pending = 1; 3481 /* 3482 * QLogic requires to wait after LINK REINIT for possible 3483 * PDISC or ADISC ELS commands 3484 */ 3485 send_notify_ack = 0; 3486 break; 3487 } 3488 3489 case IMM_NTFY_PORT_LOGOUT: 3490 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 3491 "qla_target(%d): Port logout (loop " 3492 "%#x, subcode %x)\n", vha->vp_idx, 3493 le16_to_cpu(iocb->u.isp24.nport_handle), 3494 iocb->u.isp24.status_subcode); 3495 3496 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 3497 send_notify_ack = 0; 3498 /* The sessions will be cleared in the callback, if needed */ 3499 break; 3500 3501 case IMM_NTFY_GLBL_TPRLO: 3502 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 3503 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 3504 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3505 send_notify_ack = 0; 3506 /* The sessions will be cleared in the callback, if needed */ 3507 break; 3508 3509 case IMM_NTFY_PORT_CONFIG: 3510 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 3511 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 3512 status); 3513 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 3514 send_notify_ack = 0; 3515 /* The sessions will be cleared in the callback, if needed */ 3516 break; 3517 3518 case IMM_NTFY_GLBL_LOGO: 3519 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 3520 "qla_target(%d): Link failure detected\n", 3521 vha->vp_idx); 3522 /* I_T nexus loss */ 3523 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 3524 send_notify_ack = 0; 3525 break; 3526 3527 case IMM_NTFY_IOCB_OVERFLOW: 3528 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 3529 "qla_target(%d): Cannot provide requested " 3530 "capability (IOCB overflowed the immediate notify " 3531 "resource count)\n", vha->vp_idx); 3532 break; 3533 3534 case IMM_NTFY_ABORT_TASK: 3535 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 3536 "qla_target(%d): Abort Task (S %08x I %#x -> " 3537 "L %#x)\n", vha->vp_idx, 3538 le16_to_cpu(iocb->u.isp2x.seq_id), 3539 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 3540 le16_to_cpu(iocb->u.isp2x.lun)); 3541 if (qlt_abort_task(vha, iocb) == 0) 3542 send_notify_ack = 0; 3543 break; 3544 3545 case IMM_NTFY_RESOURCE: 3546 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 3547 "qla_target(%d): Out of resources, host %ld\n", 3548 vha->vp_idx, vha->host_no); 3549 break; 3550 3551 case IMM_NTFY_MSG_RX: 3552 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 3553 "qla_target(%d): Immediate notify task %x\n", 3554 vha->vp_idx, iocb->u.isp2x.task_flags); 3555 if (qlt_handle_task_mgmt(vha, iocb) == 0) 3556 send_notify_ack = 0; 3557 break; 3558 3559 case IMM_NTFY_ELS: 3560 if (qlt_24xx_handle_els(vha, iocb) == 0) 3561 send_notify_ack = 0; 3562 break; 3563 3564 case IMM_NTFY_SRR: 3565 qlt_prepare_srr_imm(vha, iocb); 3566 send_notify_ack = 0; 3567 break; 3568 3569 default: 3570 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 3571 "qla_target(%d): Received unknown immediate " 3572 "notify status %x\n", vha->vp_idx, status); 3573 break; 3574 } 3575 3576 if (send_notify_ack) 3577 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0); 3578 } 3579 3580 /* 3581 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3582 * This function sends busy to ISP 2xxx or 24xx. 3583 */ 3584 static void qlt_send_busy(struct scsi_qla_host *vha, 3585 struct atio_from_isp *atio, uint16_t status) 3586 { 3587 struct ctio7_to_24xx *ctio24; 3588 struct qla_hw_data *ha = vha->hw; 3589 request_t *pkt; 3590 struct qla_tgt_sess *sess = NULL; 3591 3592 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3593 atio->u.isp24.fcp_hdr.s_id); 3594 if (!sess) { 3595 qlt_send_term_exchange(vha, NULL, atio, 1); 3596 return; 3597 } 3598 /* Sending marker isn't necessary, since we called from ISR */ 3599 3600 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3601 if (!pkt) { 3602 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 3603 "qla_target(%d): %s failed: unable to allocate " 3604 "request packet", vha->vp_idx, __func__); 3605 return; 3606 } 3607 3608 pkt->entry_count = 1; 3609 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3610 3611 ctio24 = (struct ctio7_to_24xx *)pkt; 3612 ctio24->entry_type = CTIO_TYPE7; 3613 ctio24->nport_handle = sess->loop_id; 3614 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 3615 ctio24->vp_index = vha->vp_idx; 3616 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3617 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3618 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3619 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3620 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 3621 __constant_cpu_to_le16( 3622 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 3623 CTIO7_FLAGS_DONT_RET_CTIO); 3624 /* 3625 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 3626 * if the explicit conformation is used. 3627 */ 3628 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 3629 ctio24->u.status1.scsi_status = cpu_to_le16(status); 3630 ctio24->u.status1.residual = get_unaligned((uint32_t *) 3631 &atio->u.isp24.fcp_cmnd.add_cdb[ 3632 atio->u.isp24.fcp_cmnd.add_cdb_len]); 3633 if (ctio24->u.status1.residual != 0) 3634 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 3635 3636 qla2x00_start_iocbs(vha, vha->req); 3637 } 3638 3639 /* ha->hardware_lock supposed to be held on entry */ 3640 /* called via callback from qla2xxx */ 3641 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 3642 struct atio_from_isp *atio) 3643 { 3644 struct qla_hw_data *ha = vha->hw; 3645 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3646 int rc; 3647 3648 if (unlikely(tgt == NULL)) { 3649 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 3650 "ATIO pkt, but no tgt (ha %p)", ha); 3651 return; 3652 } 3653 ql_dbg(ql_dbg_tgt, vha, 0xe02c, 3654 "qla_target(%d): ATIO pkt %p: type %02x count %02x", 3655 vha->vp_idx, atio, atio->u.raw.entry_type, 3656 atio->u.raw.entry_count); 3657 /* 3658 * In tgt_stop mode we also should allow all requests to pass. 3659 * Otherwise, some commands can stuck. 3660 */ 3661 3662 tgt->irq_cmd_count++; 3663 3664 switch (atio->u.raw.entry_type) { 3665 case ATIO_TYPE7: 3666 ql_dbg(ql_dbg_tgt, vha, 0xe02d, 3667 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " 3668 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", 3669 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, 3670 atio->u.isp24.fcp_cmnd.rddata, 3671 atio->u.isp24.fcp_cmnd.wrdata, 3672 atio->u.isp24.fcp_cmnd.add_cdb_len, 3673 be32_to_cpu(get_unaligned((uint32_t *) 3674 &atio->u.isp24.fcp_cmnd.add_cdb[ 3675 atio->u.isp24.fcp_cmnd.add_cdb_len])), 3676 atio->u.isp24.fcp_hdr.s_id[0], 3677 atio->u.isp24.fcp_hdr.s_id[1], 3678 atio->u.isp24.fcp_hdr.s_id[2]); 3679 3680 if (unlikely(atio->u.isp24.exchange_addr == 3681 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 3682 ql_dbg(ql_dbg_tgt, vha, 0xe058, 3683 "qla_target(%d): ATIO_TYPE7 " 3684 "received with UNKNOWN exchange address, " 3685 "sending QUEUE_FULL\n", vha->vp_idx); 3686 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 3687 break; 3688 } 3689 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 3690 rc = qlt_handle_cmd_for_atio(vha, atio); 3691 else 3692 rc = qlt_handle_task_mgmt(vha, atio); 3693 if (unlikely(rc != 0)) { 3694 if (rc == -ESRCH) { 3695 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3696 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3697 #else 3698 qlt_send_term_exchange(vha, NULL, atio, 1); 3699 #endif 3700 } else { 3701 if (tgt->tgt_stop) { 3702 ql_dbg(ql_dbg_tgt, vha, 0xe059, 3703 "qla_target: Unable to send " 3704 "command to target for req, " 3705 "ignoring.\n"); 3706 } else { 3707 ql_dbg(ql_dbg_tgt, vha, 0xe05a, 3708 "qla_target(%d): Unable to send " 3709 "command to target, sending BUSY " 3710 "status.\n", vha->vp_idx); 3711 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 3712 } 3713 } 3714 } 3715 break; 3716 3717 case IMMED_NOTIFY_TYPE: 3718 { 3719 if (unlikely(atio->u.isp2x.entry_status != 0)) { 3720 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 3721 "qla_target(%d): Received ATIO packet %x " 3722 "with error status %x\n", vha->vp_idx, 3723 atio->u.raw.entry_type, 3724 atio->u.isp2x.entry_status); 3725 break; 3726 } 3727 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 3728 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 3729 break; 3730 } 3731 3732 default: 3733 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 3734 "qla_target(%d): Received unknown ATIO atio " 3735 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 3736 break; 3737 } 3738 3739 tgt->irq_cmd_count--; 3740 } 3741 3742 /* ha->hardware_lock supposed to be held on entry */ 3743 /* called via callback from qla2xxx */ 3744 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) 3745 { 3746 struct qla_hw_data *ha = vha->hw; 3747 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3748 3749 if (unlikely(tgt == NULL)) { 3750 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 3751 "qla_target(%d): Response pkt %x received, but no " 3752 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha); 3753 return; 3754 } 3755 3756 ql_dbg(ql_dbg_tgt, vha, 0xe02f, 3757 "qla_target(%d): response pkt %p: T %02x C %02x S %02x " 3758 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type, 3759 pkt->entry_count, pkt->entry_status, pkt->handle); 3760 3761 /* 3762 * In tgt_stop mode we also should allow all requests to pass. 3763 * Otherwise, some commands can stuck. 3764 */ 3765 3766 tgt->irq_cmd_count++; 3767 3768 switch (pkt->entry_type) { 3769 case CTIO_TYPE7: 3770 { 3771 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 3772 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", 3773 vha->vp_idx); 3774 qlt_do_ctio_completion(vha, entry->handle, 3775 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3776 entry); 3777 break; 3778 } 3779 3780 case ACCEPT_TGT_IO_TYPE: 3781 { 3782 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 3783 int rc; 3784 ql_dbg(ql_dbg_tgt, vha, 0xe031, 3785 "ACCEPT_TGT_IO instance %d status %04x " 3786 "lun %04x read/write %d data_length %04x " 3787 "target_id %02x rx_id %04x\n ", vha->vp_idx, 3788 le16_to_cpu(atio->u.isp2x.status), 3789 le16_to_cpu(atio->u.isp2x.lun), 3790 atio->u.isp2x.execution_codes, 3791 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha, 3792 atio), atio->u.isp2x.rx_id); 3793 if (atio->u.isp2x.status != 3794 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 3795 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 3796 "qla_target(%d): ATIO with error " 3797 "status %x received\n", vha->vp_idx, 3798 le16_to_cpu(atio->u.isp2x.status)); 3799 break; 3800 } 3801 ql_dbg(ql_dbg_tgt, vha, 0xe032, 3802 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 3803 atio->u.isp2x.cdb[0], (unsigned long 3804 int)sizeof(atio->u.isp2x.cdb)); 3805 3806 rc = qlt_handle_cmd_for_atio(vha, atio); 3807 if (unlikely(rc != 0)) { 3808 if (rc == -ESRCH) { 3809 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 3810 qlt_send_busy(vha, atio, 0); 3811 #else 3812 qlt_send_term_exchange(vha, NULL, atio, 1); 3813 #endif 3814 } else { 3815 if (tgt->tgt_stop) { 3816 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 3817 "qla_target: Unable to send " 3818 "command to target, sending TERM " 3819 "EXCHANGE for rsp\n"); 3820 qlt_send_term_exchange(vha, NULL, 3821 atio, 1); 3822 } else { 3823 ql_dbg(ql_dbg_tgt, vha, 0xe060, 3824 "qla_target(%d): Unable to send " 3825 "command to target, sending BUSY " 3826 "status\n", vha->vp_idx); 3827 qlt_send_busy(vha, atio, 0); 3828 } 3829 } 3830 } 3831 } 3832 break; 3833 3834 case CONTINUE_TGT_IO_TYPE: 3835 { 3836 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3837 ql_dbg(ql_dbg_tgt, vha, 0xe033, 3838 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx); 3839 qlt_do_ctio_completion(vha, entry->handle, 3840 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3841 entry); 3842 break; 3843 } 3844 3845 case CTIO_A64_TYPE: 3846 { 3847 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 3848 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n", 3849 vha->vp_idx); 3850 qlt_do_ctio_completion(vha, entry->handle, 3851 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 3852 entry); 3853 break; 3854 } 3855 3856 case IMMED_NOTIFY_TYPE: 3857 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 3858 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 3859 break; 3860 3861 case NOTIFY_ACK_TYPE: 3862 if (tgt->notify_ack_expected > 0) { 3863 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 3864 ql_dbg(ql_dbg_tgt, vha, 0xe036, 3865 "NOTIFY_ACK seq %08x status %x\n", 3866 le16_to_cpu(entry->u.isp2x.seq_id), 3867 le16_to_cpu(entry->u.isp2x.status)); 3868 tgt->notify_ack_expected--; 3869 if (entry->u.isp2x.status != 3870 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 3871 ql_dbg(ql_dbg_tgt, vha, 0xe061, 3872 "qla_target(%d): NOTIFY_ACK " 3873 "failed %x\n", vha->vp_idx, 3874 le16_to_cpu(entry->u.isp2x.status)); 3875 } 3876 } else { 3877 ql_dbg(ql_dbg_tgt, vha, 0xe062, 3878 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 3879 vha->vp_idx); 3880 } 3881 break; 3882 3883 case ABTS_RECV_24XX: 3884 ql_dbg(ql_dbg_tgt, vha, 0xe037, 3885 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 3886 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 3887 break; 3888 3889 case ABTS_RESP_24XX: 3890 if (tgt->abts_resp_expected > 0) { 3891 struct abts_resp_from_24xx_fw *entry = 3892 (struct abts_resp_from_24xx_fw *)pkt; 3893 ql_dbg(ql_dbg_tgt, vha, 0xe038, 3894 "ABTS_RESP_24XX: compl_status %x\n", 3895 entry->compl_status); 3896 tgt->abts_resp_expected--; 3897 if (le16_to_cpu(entry->compl_status) != 3898 ABTS_RESP_COMPL_SUCCESS) { 3899 if ((entry->error_subcode1 == 0x1E) && 3900 (entry->error_subcode2 == 0)) { 3901 /* 3902 * We've got a race here: aborted 3903 * exchange not terminated, i.e. 3904 * response for the aborted command was 3905 * sent between the abort request was 3906 * received and processed. 3907 * Unfortunately, the firmware has a 3908 * silly requirement that all aborted 3909 * exchanges must be explicitely 3910 * terminated, otherwise it refuses to 3911 * send responses for the abort 3912 * requests. So, we have to 3913 * (re)terminate the exchange and retry 3914 * the abort response. 3915 */ 3916 qlt_24xx_retry_term_exchange(vha, 3917 entry); 3918 } else 3919 ql_dbg(ql_dbg_tgt, vha, 0xe063, 3920 "qla_target(%d): ABTS_RESP_24XX " 3921 "failed %x (subcode %x:%x)", 3922 vha->vp_idx, entry->compl_status, 3923 entry->error_subcode1, 3924 entry->error_subcode2); 3925 } 3926 } else { 3927 ql_dbg(ql_dbg_tgt, vha, 0xe064, 3928 "qla_target(%d): Unexpected ABTS_RESP_24XX " 3929 "received\n", vha->vp_idx); 3930 } 3931 break; 3932 3933 default: 3934 ql_dbg(ql_dbg_tgt, vha, 0xe065, 3935 "qla_target(%d): Received unknown response pkt " 3936 "type %x\n", vha->vp_idx, pkt->entry_type); 3937 break; 3938 } 3939 3940 tgt->irq_cmd_count--; 3941 } 3942 3943 /* 3944 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3945 */ 3946 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 3947 uint16_t *mailbox) 3948 { 3949 struct qla_hw_data *ha = vha->hw; 3950 struct qla_tgt *tgt = ha->tgt.qla_tgt; 3951 int login_code; 3952 3953 ql_dbg(ql_dbg_tgt, vha, 0xe039, 3954 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n", 3955 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done, 3956 ha->operating_mode, ha->current_topology); 3957 3958 if (!ha->tgt.tgt_ops) 3959 return; 3960 3961 if (unlikely(tgt == NULL)) { 3962 ql_dbg(ql_dbg_tgt, vha, 0xe03a, 3963 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha); 3964 return; 3965 } 3966 3967 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 3968 IS_QLA2100(ha)) 3969 return; 3970 /* 3971 * In tgt_stop mode we also should allow all requests to pass. 3972 * Otherwise, some commands can stuck. 3973 */ 3974 3975 tgt->irq_cmd_count++; 3976 3977 switch (code) { 3978 case MBA_RESET: /* Reset */ 3979 case MBA_SYSTEM_ERR: /* System Error */ 3980 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3981 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3982 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 3983 "qla_target(%d): System error async event %#x " 3984 "occurred", vha->vp_idx, code); 3985 break; 3986 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 3987 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3988 break; 3989 3990 case MBA_LOOP_UP: 3991 { 3992 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 3993 "qla_target(%d): Async LOOP_UP occurred " 3994 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 3995 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 3996 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 3997 if (tgt->link_reinit_iocb_pending) { 3998 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb, 3999 0, 0, 0, 0, 0, 0); 4000 tgt->link_reinit_iocb_pending = 0; 4001 } 4002 break; 4003 } 4004 4005 case MBA_LIP_OCCURRED: 4006 case MBA_LOOP_DOWN: 4007 case MBA_LIP_RESET: 4008 case MBA_RSCN_UPDATE: 4009 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 4010 "qla_target(%d): Async event %#x occurred " 4011 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4012 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4013 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4014 break; 4015 4016 case MBA_PORT_UPDATE: 4017 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 4018 "qla_target(%d): Port update async event %#x " 4019 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 4020 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 4021 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4022 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4023 4024 login_code = le16_to_cpu(mailbox[2]); 4025 if (login_code == 0x4) 4026 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 4027 "Async MB 2: Got PLOGI Complete\n"); 4028 else if (login_code == 0x7) 4029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 4030 "Async MB 2: Port Logged Out\n"); 4031 break; 4032 4033 default: 4034 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040, 4035 "qla_target(%d): Async event %#x occurred: " 4036 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 4037 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 4038 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 4039 break; 4040 } 4041 4042 tgt->irq_cmd_count--; 4043 } 4044 4045 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 4046 uint16_t loop_id) 4047 { 4048 fc_port_t *fcport; 4049 int rc; 4050 4051 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); 4052 if (!fcport) { 4053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 4054 "qla_target(%d): Allocation of tmp FC port failed", 4055 vha->vp_idx); 4056 return NULL; 4057 } 4058 4059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id); 4060 4061 fcport->loop_id = loop_id; 4062 4063 rc = qla2x00_get_port_database(vha, fcport, 0); 4064 if (rc != QLA_SUCCESS) { 4065 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 4066 "qla_target(%d): Failed to retrieve fcport " 4067 "information -- get_port_database() returned %x " 4068 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 4069 kfree(fcport); 4070 return NULL; 4071 } 4072 4073 return fcport; 4074 } 4075 4076 /* Must be called under tgt_mutex */ 4077 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, 4078 uint8_t *s_id) 4079 { 4080 struct qla_hw_data *ha = vha->hw; 4081 struct qla_tgt_sess *sess = NULL; 4082 fc_port_t *fcport = NULL; 4083 int rc, global_resets; 4084 uint16_t loop_id = 0; 4085 4086 retry: 4087 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); 4088 4089 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 4090 if (rc != 0) { 4091 if ((s_id[0] == 0xFF) && 4092 (s_id[1] == 0xFC)) { 4093 /* 4094 * This is Domain Controller, so it should be 4095 * OK to drop SCSI commands from it. 4096 */ 4097 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 4098 "Unable to find initiator with S_ID %x:%x:%x", 4099 s_id[0], s_id[1], s_id[2]); 4100 } else 4101 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071, 4102 "qla_target(%d): Unable to find " 4103 "initiator with S_ID %x:%x:%x", 4104 vha->vp_idx, s_id[0], s_id[1], 4105 s_id[2]); 4106 return NULL; 4107 } 4108 4109 fcport = qlt_get_port_database(vha, loop_id); 4110 if (!fcport) 4111 return NULL; 4112 4113 if (global_resets != 4114 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { 4115 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 4116 "qla_target(%d): global reset during session discovery " 4117 "(counter was %d, new %d), retrying", vha->vp_idx, 4118 global_resets, 4119 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); 4120 goto retry; 4121 } 4122 4123 sess = qlt_create_sess(vha, fcport, true); 4124 4125 kfree(fcport); 4126 return sess; 4127 } 4128 4129 static void qlt_abort_work(struct qla_tgt *tgt, 4130 struct qla_tgt_sess_work_param *prm) 4131 { 4132 struct scsi_qla_host *vha = tgt->vha; 4133 struct qla_hw_data *ha = vha->hw; 4134 struct qla_tgt_sess *sess = NULL; 4135 unsigned long flags; 4136 uint32_t be_s_id; 4137 uint8_t s_id[3]; 4138 int rc; 4139 4140 spin_lock_irqsave(&ha->hardware_lock, flags); 4141 4142 if (tgt->tgt_stop) 4143 goto out_term; 4144 4145 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 4146 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 4147 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 4148 4149 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4150 (unsigned char *)&be_s_id); 4151 if (!sess) { 4152 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4153 4154 mutex_lock(&ha->tgt.tgt_mutex); 4155 sess = qlt_make_local_sess(vha, s_id); 4156 /* sess has got an extra creation ref */ 4157 mutex_unlock(&ha->tgt.tgt_mutex); 4158 4159 spin_lock_irqsave(&ha->hardware_lock, flags); 4160 if (!sess) 4161 goto out_term; 4162 } else { 4163 kref_get(&sess->se_sess->sess_kref); 4164 } 4165 4166 if (tgt->tgt_stop) 4167 goto out_term; 4168 4169 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 4170 if (rc != 0) 4171 goto out_term; 4172 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4173 4174 ha->tgt.tgt_ops->put_sess(sess); 4175 return; 4176 4177 out_term: 4178 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); 4179 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4180 if (sess) 4181 ha->tgt.tgt_ops->put_sess(sess); 4182 } 4183 4184 static void qlt_tmr_work(struct qla_tgt *tgt, 4185 struct qla_tgt_sess_work_param *prm) 4186 { 4187 struct atio_from_isp *a = &prm->tm_iocb2; 4188 struct scsi_qla_host *vha = tgt->vha; 4189 struct qla_hw_data *ha = vha->hw; 4190 struct qla_tgt_sess *sess = NULL; 4191 unsigned long flags; 4192 uint8_t *s_id = NULL; /* to hide compiler warnings */ 4193 int rc; 4194 uint32_t lun, unpacked_lun; 4195 int lun_size, fn; 4196 void *iocb; 4197 4198 spin_lock_irqsave(&ha->hardware_lock, flags); 4199 4200 if (tgt->tgt_stop) 4201 goto out_term; 4202 4203 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 4204 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 4205 if (!sess) { 4206 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4207 4208 mutex_lock(&ha->tgt.tgt_mutex); 4209 sess = qlt_make_local_sess(vha, s_id); 4210 /* sess has got an extra creation ref */ 4211 mutex_unlock(&ha->tgt.tgt_mutex); 4212 4213 spin_lock_irqsave(&ha->hardware_lock, flags); 4214 if (!sess) 4215 goto out_term; 4216 } else { 4217 kref_get(&sess->se_sess->sess_kref); 4218 } 4219 4220 iocb = a; 4221 lun = a->u.isp24.fcp_cmnd.lun; 4222 lun_size = sizeof(lun); 4223 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4224 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 4225 4226 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4227 if (rc != 0) 4228 goto out_term; 4229 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4230 4231 ha->tgt.tgt_ops->put_sess(sess); 4232 return; 4233 4234 out_term: 4235 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); 4236 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4237 if (sess) 4238 ha->tgt.tgt_ops->put_sess(sess); 4239 } 4240 4241 static void qlt_sess_work_fn(struct work_struct *work) 4242 { 4243 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 4244 struct scsi_qla_host *vha = tgt->vha; 4245 unsigned long flags; 4246 4247 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 4248 4249 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4250 while (!list_empty(&tgt->sess_works_list)) { 4251 struct qla_tgt_sess_work_param *prm = list_entry( 4252 tgt->sess_works_list.next, typeof(*prm), 4253 sess_works_list_entry); 4254 4255 /* 4256 * This work can be scheduled on several CPUs at time, so we 4257 * must delete the entry to eliminate double processing 4258 */ 4259 list_del(&prm->sess_works_list_entry); 4260 4261 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4262 4263 switch (prm->type) { 4264 case QLA_TGT_SESS_WORK_ABORT: 4265 qlt_abort_work(tgt, prm); 4266 break; 4267 case QLA_TGT_SESS_WORK_TM: 4268 qlt_tmr_work(tgt, prm); 4269 break; 4270 default: 4271 BUG_ON(1); 4272 break; 4273 } 4274 4275 spin_lock_irqsave(&tgt->sess_work_lock, flags); 4276 4277 kfree(prm); 4278 } 4279 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 4280 } 4281 4282 /* Must be called under tgt_host_action_mutex */ 4283 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 4284 { 4285 struct qla_tgt *tgt; 4286 4287 if (!QLA_TGT_MODE_ENABLED()) 4288 return 0; 4289 4290 if (!IS_TGT_MODE_CAPABLE(ha)) { 4291 ql_log(ql_log_warn, base_vha, 0xe070, 4292 "This adapter does not support target mode.\n"); 4293 return 0; 4294 } 4295 4296 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 4297 "Registering target for host %ld(%p)", base_vha->host_no, ha); 4298 4299 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); 4300 4301 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 4302 if (!tgt) { 4303 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 4304 "Unable to allocate struct qla_tgt\n"); 4305 return -ENOMEM; 4306 } 4307 4308 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 4309 base_vha->host->hostt->supported_mode |= MODE_TARGET; 4310 4311 tgt->ha = ha; 4312 tgt->vha = base_vha; 4313 init_waitqueue_head(&tgt->waitQ); 4314 INIT_LIST_HEAD(&tgt->sess_list); 4315 INIT_LIST_HEAD(&tgt->del_sess_list); 4316 INIT_DELAYED_WORK(&tgt->sess_del_work, 4317 (void (*)(struct work_struct *))qlt_del_sess_work_fn); 4318 spin_lock_init(&tgt->sess_work_lock); 4319 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 4320 INIT_LIST_HEAD(&tgt->sess_works_list); 4321 spin_lock_init(&tgt->srr_lock); 4322 INIT_LIST_HEAD(&tgt->srr_ctio_list); 4323 INIT_LIST_HEAD(&tgt->srr_imm_list); 4324 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); 4325 atomic_set(&tgt->tgt_global_resets_count, 0); 4326 4327 ha->tgt.qla_tgt = tgt; 4328 4329 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 4330 "qla_target(%d): using 64 Bit PCI addressing", 4331 base_vha->vp_idx); 4332 tgt->tgt_enable_64bit_addr = 1; 4333 /* 3 is reserved */ 4334 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 4335 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; 4336 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; 4337 4338 mutex_lock(&qla_tgt_mutex); 4339 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 4340 mutex_unlock(&qla_tgt_mutex); 4341 4342 return 0; 4343 } 4344 4345 /* Must be called under tgt_host_action_mutex */ 4346 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 4347 { 4348 if (!ha->tgt.qla_tgt) 4349 return 0; 4350 4351 mutex_lock(&qla_tgt_mutex); 4352 list_del(&ha->tgt.qla_tgt->tgt_list_entry); 4353 mutex_unlock(&qla_tgt_mutex); 4354 4355 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 4356 vha->host_no, ha); 4357 qlt_release(ha->tgt.qla_tgt); 4358 4359 return 0; 4360 } 4361 4362 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 4363 unsigned char *b) 4364 { 4365 int i; 4366 4367 pr_debug("qla2xxx HW vha->node_name: "); 4368 for (i = 0; i < WWN_SIZE; i++) 4369 pr_debug("%02x ", vha->node_name[i]); 4370 pr_debug("\n"); 4371 pr_debug("qla2xxx HW vha->port_name: "); 4372 for (i = 0; i < WWN_SIZE; i++) 4373 pr_debug("%02x ", vha->port_name[i]); 4374 pr_debug("\n"); 4375 4376 pr_debug("qla2xxx passed configfs WWPN: "); 4377 put_unaligned_be64(wwpn, b); 4378 for (i = 0; i < WWN_SIZE; i++) 4379 pr_debug("%02x ", b[i]); 4380 pr_debug("\n"); 4381 } 4382 4383 /** 4384 * qla_tgt_lport_register - register lport with external module 4385 * 4386 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops 4387 * @wwpn: Passwd FC target WWPN 4388 * @callback: lport initialization callback for tcm_qla2xxx code 4389 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 4390 */ 4391 int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, 4392 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) 4393 { 4394 struct qla_tgt *tgt; 4395 struct scsi_qla_host *vha; 4396 struct qla_hw_data *ha; 4397 struct Scsi_Host *host; 4398 unsigned long flags; 4399 int rc; 4400 u8 b[WWN_SIZE]; 4401 4402 mutex_lock(&qla_tgt_mutex); 4403 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 4404 vha = tgt->vha; 4405 ha = vha->hw; 4406 4407 host = vha->host; 4408 if (!host) 4409 continue; 4410 4411 if (ha->tgt.tgt_ops != NULL) 4412 continue; 4413 4414 if (!(host->hostt->supported_mode & MODE_TARGET)) 4415 continue; 4416 4417 spin_lock_irqsave(&ha->hardware_lock, flags); 4418 if (host->active_mode & MODE_TARGET) { 4419 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 4420 host->host_no); 4421 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4422 continue; 4423 } 4424 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4425 4426 if (!scsi_host_get(host)) { 4427 ql_dbg(ql_dbg_tgt, vha, 0xe068, 4428 "Unable to scsi_host_get() for" 4429 " qla2xxx scsi_host\n"); 4430 continue; 4431 } 4432 qlt_lport_dump(vha, wwpn, b); 4433 4434 if (memcmp(vha->port_name, b, WWN_SIZE)) { 4435 scsi_host_put(host); 4436 continue; 4437 } 4438 /* 4439 * Setup passed parameters ahead of invoking callback 4440 */ 4441 ha->tgt.tgt_ops = qla_tgt_ops; 4442 ha->tgt.target_lport_ptr = target_lport_ptr; 4443 rc = (*callback)(vha); 4444 if (rc != 0) { 4445 ha->tgt.tgt_ops = NULL; 4446 ha->tgt.target_lport_ptr = NULL; 4447 } 4448 mutex_unlock(&qla_tgt_mutex); 4449 return rc; 4450 } 4451 mutex_unlock(&qla_tgt_mutex); 4452 4453 return -ENODEV; 4454 } 4455 EXPORT_SYMBOL(qlt_lport_register); 4456 4457 /** 4458 * qla_tgt_lport_deregister - Degister lport 4459 * 4460 * @vha: Registered scsi_qla_host pointer 4461 */ 4462 void qlt_lport_deregister(struct scsi_qla_host *vha) 4463 { 4464 struct qla_hw_data *ha = vha->hw; 4465 struct Scsi_Host *sh = vha->host; 4466 /* 4467 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 4468 */ 4469 ha->tgt.target_lport_ptr = NULL; 4470 ha->tgt.tgt_ops = NULL; 4471 /* 4472 * Release the Scsi_Host reference for the underlying qla2xxx host 4473 */ 4474 scsi_host_put(sh); 4475 } 4476 EXPORT_SYMBOL(qlt_lport_deregister); 4477 4478 /* Must be called under HW lock */ 4479 void qlt_set_mode(struct scsi_qla_host *vha) 4480 { 4481 struct qla_hw_data *ha = vha->hw; 4482 4483 switch (ql2x_ini_mode) { 4484 case QLA2XXX_INI_MODE_DISABLED: 4485 case QLA2XXX_INI_MODE_EXCLUSIVE: 4486 vha->host->active_mode = MODE_TARGET; 4487 break; 4488 case QLA2XXX_INI_MODE_ENABLED: 4489 vha->host->active_mode |= MODE_TARGET; 4490 break; 4491 default: 4492 break; 4493 } 4494 4495 if (ha->tgt.ini_mode_force_reverse) 4496 qla_reverse_ini_mode(vha); 4497 } 4498 4499 /* Must be called under HW lock */ 4500 void qlt_clear_mode(struct scsi_qla_host *vha) 4501 { 4502 struct qla_hw_data *ha = vha->hw; 4503 4504 switch (ql2x_ini_mode) { 4505 case QLA2XXX_INI_MODE_DISABLED: 4506 vha->host->active_mode = MODE_UNKNOWN; 4507 break; 4508 case QLA2XXX_INI_MODE_EXCLUSIVE: 4509 vha->host->active_mode = MODE_INITIATOR; 4510 break; 4511 case QLA2XXX_INI_MODE_ENABLED: 4512 vha->host->active_mode &= ~MODE_TARGET; 4513 break; 4514 default: 4515 break; 4516 } 4517 4518 if (ha->tgt.ini_mode_force_reverse) 4519 qla_reverse_ini_mode(vha); 4520 } 4521 4522 /* 4523 * qla_tgt_enable_vha - NO LOCK HELD 4524 * 4525 * host_reset, bring up w/ Target Mode Enabled 4526 */ 4527 void 4528 qlt_enable_vha(struct scsi_qla_host *vha) 4529 { 4530 struct qla_hw_data *ha = vha->hw; 4531 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4532 unsigned long flags; 4533 4534 if (!tgt) { 4535 ql_dbg(ql_dbg_tgt, vha, 0xe069, 4536 "Unable to locate qla_tgt pointer from" 4537 " struct qla_hw_data\n"); 4538 dump_stack(); 4539 return; 4540 } 4541 4542 spin_lock_irqsave(&ha->hardware_lock, flags); 4543 tgt->tgt_stopped = 0; 4544 qlt_set_mode(vha); 4545 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4546 4547 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4548 qla2xxx_wake_dpc(vha); 4549 qla2x00_wait_for_hba_online(vha); 4550 } 4551 EXPORT_SYMBOL(qlt_enable_vha); 4552 4553 /* 4554 * qla_tgt_disable_vha - NO LOCK HELD 4555 * 4556 * Disable Target Mode and reset the adapter 4557 */ 4558 void 4559 qlt_disable_vha(struct scsi_qla_host *vha) 4560 { 4561 struct qla_hw_data *ha = vha->hw; 4562 struct qla_tgt *tgt = ha->tgt.qla_tgt; 4563 unsigned long flags; 4564 4565 if (!tgt) { 4566 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 4567 "Unable to locate qla_tgt pointer from" 4568 " struct qla_hw_data\n"); 4569 dump_stack(); 4570 return; 4571 } 4572 4573 spin_lock_irqsave(&ha->hardware_lock, flags); 4574 qlt_clear_mode(vha); 4575 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4576 4577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4578 qla2xxx_wake_dpc(vha); 4579 qla2x00_wait_for_hba_online(vha); 4580 } 4581 4582 /* 4583 * Called from qla_init.c:qla24xx_vport_create() contex to setup 4584 * the target mode specific struct scsi_qla_host and struct qla_hw_data 4585 * members. 4586 */ 4587 void 4588 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 4589 { 4590 if (!qla_tgt_mode_enabled(vha)) 4591 return; 4592 4593 mutex_init(&ha->tgt.tgt_mutex); 4594 mutex_init(&ha->tgt.tgt_host_action_mutex); 4595 4596 qlt_clear_mode(vha); 4597 4598 /* 4599 * NOTE: Currently the value is kept the same for <24xx and 4600 * >=24xx ISPs. If it is necessary to change it, 4601 * the check should be added for specific ISPs, 4602 * assigning the value appropriately. 4603 */ 4604 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 4605 } 4606 4607 void 4608 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req) 4609 { 4610 /* 4611 * FC-4 Feature bit 0 indicates target functionality to the name server. 4612 */ 4613 if (qla_tgt_mode_enabled(vha)) { 4614 if (qla_ini_mode_enabled(vha)) 4615 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1; 4616 else 4617 ct_req->req.rff_id.fc4_feature = BIT_0; 4618 } else if (qla_ini_mode_enabled(vha)) { 4619 ct_req->req.rff_id.fc4_feature = BIT_1; 4620 } 4621 } 4622 4623 /* 4624 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 4625 * @ha: HA context 4626 * 4627 * Beginning of ATIO ring has initialization control block already built 4628 * by nvram config routine. 4629 * 4630 * Returns 0 on success. 4631 */ 4632 void 4633 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 4634 { 4635 struct qla_hw_data *ha = vha->hw; 4636 uint16_t cnt; 4637 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 4638 4639 if (!qla_tgt_mode_enabled(vha)) 4640 return; 4641 4642 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 4643 pkt->u.raw.signature = ATIO_PROCESSED; 4644 pkt++; 4645 } 4646 4647 } 4648 4649 /* 4650 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 4651 * @ha: SCSI driver HA context 4652 */ 4653 void 4654 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) 4655 { 4656 struct qla_hw_data *ha = vha->hw; 4657 struct atio_from_isp *pkt; 4658 int cnt, i; 4659 4660 if (!vha->flags.online) 4661 return; 4662 4663 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 4664 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4665 cnt = pkt->u.raw.entry_count; 4666 4667 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt); 4668 4669 for (i = 0; i < cnt; i++) { 4670 ha->tgt.atio_ring_index++; 4671 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 4672 ha->tgt.atio_ring_index = 0; 4673 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 4674 } else 4675 ha->tgt.atio_ring_ptr++; 4676 4677 pkt->u.raw.signature = ATIO_PROCESSED; 4678 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 4679 } 4680 wmb(); 4681 } 4682 4683 /* Adjust ring index */ 4684 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 4685 } 4686 4687 void 4688 qlt_24xx_config_rings(struct scsi_qla_host *vha) 4689 { 4690 struct qla_hw_data *ha = vha->hw; 4691 if (!QLA_TGT_MODE_ENABLED()) 4692 return; 4693 4694 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 4695 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 4696 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 4697 4698 if (IS_ATIO_MSIX_CAPABLE(ha)) { 4699 struct qla_msix_entry *msix = &ha->msix_entries[2]; 4700 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 4701 4702 icb->msix_atio = cpu_to_le16(msix->entry); 4703 ql_dbg(ql_dbg_init, vha, 0xf072, 4704 "Registering ICB vector 0x%x for atio que.\n", 4705 msix->entry); 4706 } 4707 } 4708 4709 void 4710 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 4711 { 4712 struct qla_hw_data *ha = vha->hw; 4713 4714 if (qla_tgt_mode_enabled(vha)) { 4715 if (!ha->tgt.saved_set) { 4716 /* We save only once */ 4717 ha->tgt.saved_exchange_count = nv->exchange_count; 4718 ha->tgt.saved_firmware_options_1 = 4719 nv->firmware_options_1; 4720 ha->tgt.saved_firmware_options_2 = 4721 nv->firmware_options_2; 4722 ha->tgt.saved_firmware_options_3 = 4723 nv->firmware_options_3; 4724 ha->tgt.saved_set = 1; 4725 } 4726 4727 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4728 4729 /* Enable target mode */ 4730 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4731 4732 /* Disable ini mode, if requested */ 4733 if (!qla_ini_mode_enabled(vha)) 4734 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 4735 4736 /* Disable Full Login after LIP */ 4737 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4738 /* Enable initial LIP */ 4739 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4740 /* Enable FC tapes support */ 4741 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4742 /* Disable Full Login after LIP */ 4743 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4744 /* Enable target PRLI control */ 4745 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4746 } else { 4747 if (ha->tgt.saved_set) { 4748 nv->exchange_count = ha->tgt.saved_exchange_count; 4749 nv->firmware_options_1 = 4750 ha->tgt.saved_firmware_options_1; 4751 nv->firmware_options_2 = 4752 ha->tgt.saved_firmware_options_2; 4753 nv->firmware_options_3 = 4754 ha->tgt.saved_firmware_options_3; 4755 } 4756 return; 4757 } 4758 4759 /* out-of-order frames reassembly */ 4760 nv->firmware_options_3 |= BIT_6|BIT_9; 4761 4762 if (ha->tgt.enable_class_2) { 4763 if (vha->flags.init_done) 4764 fc_host_supported_classes(vha->host) = 4765 FC_COS_CLASS2 | FC_COS_CLASS3; 4766 4767 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4768 } else { 4769 if (vha->flags.init_done) 4770 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4771 4772 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4773 } 4774 } 4775 4776 void 4777 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 4778 struct init_cb_24xx *icb) 4779 { 4780 struct qla_hw_data *ha = vha->hw; 4781 4782 if (ha->tgt.node_name_set) { 4783 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4784 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4785 } 4786 } 4787 4788 void 4789 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 4790 { 4791 struct qla_hw_data *ha = vha->hw; 4792 4793 if (!QLA_TGT_MODE_ENABLED()) 4794 return; 4795 4796 if (qla_tgt_mode_enabled(vha)) { 4797 if (!ha->tgt.saved_set) { 4798 /* We save only once */ 4799 ha->tgt.saved_exchange_count = nv->exchange_count; 4800 ha->tgt.saved_firmware_options_1 = 4801 nv->firmware_options_1; 4802 ha->tgt.saved_firmware_options_2 = 4803 nv->firmware_options_2; 4804 ha->tgt.saved_firmware_options_3 = 4805 nv->firmware_options_3; 4806 ha->tgt.saved_set = 1; 4807 } 4808 4809 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 4810 4811 /* Enable target mode */ 4812 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 4813 4814 /* Disable ini mode, if requested */ 4815 if (!qla_ini_mode_enabled(vha)) 4816 nv->firmware_options_1 |= 4817 __constant_cpu_to_le32(BIT_5); 4818 4819 /* Disable Full Login after LIP */ 4820 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 4821 /* Enable initial LIP */ 4822 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 4823 /* Enable FC tapes support */ 4824 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 4825 /* Disable Full Login after LIP */ 4826 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 4827 /* Enable target PRLI control */ 4828 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 4829 } else { 4830 if (ha->tgt.saved_set) { 4831 nv->exchange_count = ha->tgt.saved_exchange_count; 4832 nv->firmware_options_1 = 4833 ha->tgt.saved_firmware_options_1; 4834 nv->firmware_options_2 = 4835 ha->tgt.saved_firmware_options_2; 4836 nv->firmware_options_3 = 4837 ha->tgt.saved_firmware_options_3; 4838 } 4839 return; 4840 } 4841 4842 /* out-of-order frames reassembly */ 4843 nv->firmware_options_3 |= BIT_6|BIT_9; 4844 4845 if (ha->tgt.enable_class_2) { 4846 if (vha->flags.init_done) 4847 fc_host_supported_classes(vha->host) = 4848 FC_COS_CLASS2 | FC_COS_CLASS3; 4849 4850 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 4851 } else { 4852 if (vha->flags.init_done) 4853 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 4854 4855 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 4856 } 4857 } 4858 4859 void 4860 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 4861 struct init_cb_81xx *icb) 4862 { 4863 struct qla_hw_data *ha = vha->hw; 4864 4865 if (!QLA_TGT_MODE_ENABLED()) 4866 return; 4867 4868 if (ha->tgt.node_name_set) { 4869 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 4870 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 4871 } 4872 } 4873 4874 void 4875 qlt_83xx_iospace_config(struct qla_hw_data *ha) 4876 { 4877 if (!QLA_TGT_MODE_ENABLED()) 4878 return; 4879 4880 ha->msix_count += 1; /* For ATIO Q */ 4881 } 4882 4883 int 4884 qlt_24xx_process_response_error(struct scsi_qla_host *vha, 4885 struct sts_entry_24xx *pkt) 4886 { 4887 switch (pkt->entry_type) { 4888 case ABTS_RECV_24XX: 4889 case ABTS_RESP_24XX: 4890 case CTIO_TYPE7: 4891 case NOTIFY_ACK_TYPE: 4892 return 1; 4893 default: 4894 return 0; 4895 } 4896 } 4897 4898 void 4899 qlt_modify_vp_config(struct scsi_qla_host *vha, 4900 struct vp_config_entry_24xx *vpmod) 4901 { 4902 if (qla_tgt_mode_enabled(vha)) 4903 vpmod->options_idx1 &= ~BIT_5; 4904 /* Disable ini mode, if requested */ 4905 if (!qla_ini_mode_enabled(vha)) 4906 vpmod->options_idx1 &= ~BIT_4; 4907 } 4908 4909 void 4910 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 4911 { 4912 if (!QLA_TGT_MODE_ENABLED()) 4913 return; 4914 4915 if (ha->mqenable || IS_QLA83XX(ha)) { 4916 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 4917 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 4918 } else { 4919 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 4920 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 4921 } 4922 4923 mutex_init(&ha->tgt.tgt_mutex); 4924 mutex_init(&ha->tgt.tgt_host_action_mutex); 4925 qlt_clear_mode(base_vha); 4926 } 4927 4928 irqreturn_t 4929 qla83xx_msix_atio_q(int irq, void *dev_id) 4930 { 4931 struct rsp_que *rsp; 4932 scsi_qla_host_t *vha; 4933 struct qla_hw_data *ha; 4934 unsigned long flags; 4935 4936 rsp = (struct rsp_que *) dev_id; 4937 ha = rsp->hw; 4938 vha = pci_get_drvdata(ha->pdev); 4939 4940 spin_lock_irqsave(&ha->hardware_lock, flags); 4941 4942 qlt_24xx_process_atio_queue(vha); 4943 qla24xx_process_response_queue(vha, rsp); 4944 4945 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4946 4947 return IRQ_HANDLED; 4948 } 4949 4950 int 4951 qlt_mem_alloc(struct qla_hw_data *ha) 4952 { 4953 if (!QLA_TGT_MODE_ENABLED()) 4954 return 0; 4955 4956 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) * 4957 MAX_MULTI_ID_FABRIC, GFP_KERNEL); 4958 if (!ha->tgt.tgt_vp_map) 4959 return -ENOMEM; 4960 4961 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 4962 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 4963 &ha->tgt.atio_dma, GFP_KERNEL); 4964 if (!ha->tgt.atio_ring) { 4965 kfree(ha->tgt.tgt_vp_map); 4966 return -ENOMEM; 4967 } 4968 return 0; 4969 } 4970 4971 void 4972 qlt_mem_free(struct qla_hw_data *ha) 4973 { 4974 if (!QLA_TGT_MODE_ENABLED()) 4975 return; 4976 4977 if (ha->tgt.atio_ring) { 4978 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 4979 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 4980 ha->tgt.atio_dma); 4981 } 4982 kfree(ha->tgt.tgt_vp_map); 4983 } 4984 4985 /* vport_slock to be held by the caller */ 4986 void 4987 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 4988 { 4989 if (!QLA_TGT_MODE_ENABLED()) 4990 return; 4991 4992 switch (cmd) { 4993 case SET_VP_IDX: 4994 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 4995 break; 4996 case SET_AL_PA: 4997 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; 4998 break; 4999 case RESET_VP_IDX: 5000 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 5001 break; 5002 case RESET_AL_PA: 5003 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; 5004 break; 5005 } 5006 } 5007 5008 static int __init qlt_parse_ini_mode(void) 5009 { 5010 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 5011 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 5012 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 5013 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 5014 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 5015 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 5016 else 5017 return false; 5018 5019 return true; 5020 } 5021 5022 int __init qlt_init(void) 5023 { 5024 int ret; 5025 5026 if (!qlt_parse_ini_mode()) { 5027 ql_log(ql_log_fatal, NULL, 0xe06b, 5028 "qlt_parse_ini_mode() failed\n"); 5029 return -EINVAL; 5030 } 5031 5032 if (!QLA_TGT_MODE_ENABLED()) 5033 return 0; 5034 5035 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", 5036 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, 5037 NULL); 5038 if (!qla_tgt_cmd_cachep) { 5039 ql_log(ql_log_fatal, NULL, 0xe06c, 5040 "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); 5041 return -ENOMEM; 5042 } 5043 5044 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 5045 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 5046 qla_tgt_mgmt_cmd), 0, NULL); 5047 if (!qla_tgt_mgmt_cmd_cachep) { 5048 ql_log(ql_log_fatal, NULL, 0xe06d, 5049 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 5050 ret = -ENOMEM; 5051 goto out; 5052 } 5053 5054 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 5055 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 5056 if (!qla_tgt_mgmt_cmd_mempool) { 5057 ql_log(ql_log_fatal, NULL, 0xe06e, 5058 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 5059 ret = -ENOMEM; 5060 goto out_mgmt_cmd_cachep; 5061 } 5062 5063 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 5064 if (!qla_tgt_wq) { 5065 ql_log(ql_log_fatal, NULL, 0xe06f, 5066 "alloc_workqueue for qla_tgt_wq failed\n"); 5067 ret = -ENOMEM; 5068 goto out_cmd_mempool; 5069 } 5070 /* 5071 * Return 1 to signal that initiator-mode is being disabled 5072 */ 5073 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 5074 5075 out_cmd_mempool: 5076 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5077 out_mgmt_cmd_cachep: 5078 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5079 out: 5080 kmem_cache_destroy(qla_tgt_cmd_cachep); 5081 return ret; 5082 } 5083 5084 void qlt_exit(void) 5085 { 5086 if (!QLA_TGT_MODE_ENABLED()) 5087 return; 5088 5089 destroy_workqueue(qla_tgt_wq); 5090 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 5091 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 5092 kmem_cache_destroy(qla_tgt_cmd_cachep); 5093 } 5094