1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/blkdev.h> 10 #include <linux/delay.h> 11 12 #include <scsi/scsi_tcq.h> 13 14 /** 15 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * @sp: SCSI command 17 * 18 * Returns the proper CF_* direction based on CDB. 19 */ 20 static inline uint16_t 21 qla2x00_get_cmd_direction(srb_t *sp) 22 { 23 uint16_t cflags; 24 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 25 struct scsi_qla_host *vha = sp->vha; 26 27 cflags = 0; 28 29 /* Set transfer direction */ 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 31 cflags = CF_WRITE; 32 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 33 vha->qla_stats.output_requests++; 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 cflags = CF_READ; 36 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 37 vha->qla_stats.input_requests++; 38 } 39 return (cflags); 40 } 41 42 /** 43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 44 * Continuation Type 0 IOCBs to allocate. 45 * 46 * @dsds: number of data segment descriptors needed 47 * 48 * Returns the number of IOCB entries needed to store @dsds. 49 */ 50 uint16_t 51 qla2x00_calc_iocbs_32(uint16_t dsds) 52 { 53 uint16_t iocbs; 54 55 iocbs = 1; 56 if (dsds > 3) { 57 iocbs += (dsds - 3) / 7; 58 if ((dsds - 3) % 7) 59 iocbs++; 60 } 61 return (iocbs); 62 } 63 64 /** 65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 66 * Continuation Type 1 IOCBs to allocate. 67 * 68 * @dsds: number of data segment descriptors needed 69 * 70 * Returns the number of IOCB entries needed to store @dsds. 71 */ 72 uint16_t 73 qla2x00_calc_iocbs_64(uint16_t dsds) 74 { 75 uint16_t iocbs; 76 77 iocbs = 1; 78 if (dsds > 2) { 79 iocbs += (dsds - 2) / 5; 80 if ((dsds - 2) % 5) 81 iocbs++; 82 } 83 return (iocbs); 84 } 85 86 /** 87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 88 * @vha: HA context 89 * 90 * Returns a pointer to the Continuation Type 0 IOCB packet. 91 */ 92 static inline cont_entry_t * 93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 94 { 95 cont_entry_t *cont_pkt; 96 struct req_que *req = vha->req; 97 /* Adjust ring index. */ 98 req->ring_index++; 99 if (req->ring_index == req->length) { 100 req->ring_index = 0; 101 req->ring_ptr = req->ring; 102 } else { 103 req->ring_ptr++; 104 } 105 106 cont_pkt = (cont_entry_t *)req->ring_ptr; 107 108 /* Load packet defaults. */ 109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); 110 111 return (cont_pkt); 112 } 113 114 /** 115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 116 * @vha: HA context 117 * @req: request queue 118 * 119 * Returns a pointer to the continuation type 1 IOCB packet. 120 */ 121 cont_a64_entry_t * 122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 123 { 124 cont_a64_entry_t *cont_pkt; 125 126 /* Adjust ring index. */ 127 req->ring_index++; 128 if (req->ring_index == req->length) { 129 req->ring_index = 0; 130 req->ring_ptr = req->ring; 131 } else { 132 req->ring_ptr++; 133 } 134 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 136 137 /* Load packet defaults. */ 138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : 139 CONTINUE_A64_TYPE, &cont_pkt->entry_type); 140 141 return (cont_pkt); 142 } 143 144 inline int 145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 146 { 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 148 149 /* We always use DIFF Bundling for best performance */ 150 *fw_prot_opts = 0; 151 152 /* Translate SCSI opcode to a protection opcode */ 153 switch (scsi_get_prot_op(cmd)) { 154 case SCSI_PROT_READ_STRIP: 155 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 156 break; 157 case SCSI_PROT_WRITE_INSERT: 158 *fw_prot_opts |= PO_MODE_DIF_INSERT; 159 break; 160 case SCSI_PROT_READ_INSERT: 161 *fw_prot_opts |= PO_MODE_DIF_INSERT; 162 break; 163 case SCSI_PROT_WRITE_STRIP: 164 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 165 break; 166 case SCSI_PROT_READ_PASS: 167 case SCSI_PROT_WRITE_PASS: 168 if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 169 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 170 else 171 *fw_prot_opts |= PO_MODE_DIF_PASS; 172 break; 173 default: /* Normal Request */ 174 *fw_prot_opts |= PO_MODE_DIF_PASS; 175 break; 176 } 177 178 if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK)) 179 *fw_prot_opts |= PO_DISABLE_GUARD_CHECK; 180 181 return scsi_prot_sg_count(cmd); 182 } 183 184 /* 185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 186 * capable IOCB types. 187 * 188 * @sp: SRB command to process 189 * @cmd_pkt: Command type 2 IOCB 190 * @tot_dsds: Total number of segments to transfer 191 */ 192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 193 uint16_t tot_dsds) 194 { 195 uint16_t avail_dsds; 196 struct dsd32 *cur_dsd; 197 scsi_qla_host_t *vha; 198 struct scsi_cmnd *cmd; 199 struct scatterlist *sg; 200 int i; 201 202 cmd = GET_CMD_SP(sp); 203 204 /* Update entry type to indicate Command Type 2 IOCB */ 205 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); 206 207 /* No data transfer */ 208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 209 cmd_pkt->byte_count = cpu_to_le32(0); 210 return; 211 } 212 213 vha = sp->vha; 214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 215 216 /* Three DSDs are available in the Command Type 2 IOCB */ 217 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); 218 cur_dsd = cmd_pkt->dsd32; 219 220 /* Load data segments */ 221 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 222 cont_entry_t *cont_pkt; 223 224 /* Allocate additional continuation packets? */ 225 if (avail_dsds == 0) { 226 /* 227 * Seven DSDs are available in the Continuation 228 * Type 0 IOCB. 229 */ 230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 231 cur_dsd = cont_pkt->dsd; 232 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 233 } 234 235 append_dsd32(&cur_dsd, sg); 236 avail_dsds--; 237 } 238 } 239 240 /** 241 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 242 * capable IOCB types. 243 * 244 * @sp: SRB command to process 245 * @cmd_pkt: Command type 3 IOCB 246 * @tot_dsds: Total number of segments to transfer 247 */ 248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 249 uint16_t tot_dsds) 250 { 251 uint16_t avail_dsds; 252 struct dsd64 *cur_dsd; 253 scsi_qla_host_t *vha; 254 struct scsi_cmnd *cmd; 255 struct scatterlist *sg; 256 int i; 257 258 cmd = GET_CMD_SP(sp); 259 260 /* Update entry type to indicate Command Type 3 IOCB */ 261 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); 262 263 /* No data transfer */ 264 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 265 cmd_pkt->byte_count = cpu_to_le32(0); 266 return; 267 } 268 269 vha = sp->vha; 270 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 271 272 /* Two DSDs are available in the Command Type 3 IOCB */ 273 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); 274 cur_dsd = cmd_pkt->dsd64; 275 276 /* Load data segments */ 277 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 278 cont_a64_entry_t *cont_pkt; 279 280 /* Allocate additional continuation packets? */ 281 if (avail_dsds == 0) { 282 /* 283 * Five DSDs are available in the Continuation 284 * Type 1 IOCB. 285 */ 286 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 287 cur_dsd = cont_pkt->dsd; 288 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 289 } 290 291 append_dsd64(&cur_dsd, sg); 292 avail_dsds--; 293 } 294 } 295 296 /* 297 * Find the first handle that is not in use, starting from 298 * req->current_outstanding_cmd + 1. The caller must hold the lock that is 299 * associated with @req. 300 */ 301 uint32_t qla2xxx_get_next_handle(struct req_que *req) 302 { 303 uint32_t index, handle = req->current_outstanding_cmd; 304 305 for (index = 1; index < req->num_outstanding_cmds; index++) { 306 handle++; 307 if (handle == req->num_outstanding_cmds) 308 handle = 1; 309 if (!req->outstanding_cmds[handle]) 310 return handle; 311 } 312 313 return 0; 314 } 315 316 /** 317 * qla2x00_start_scsi() - Send a SCSI command to the ISP 318 * @sp: command to send to the ISP 319 * 320 * Returns non-zero if a failure occurred, else zero. 321 */ 322 int 323 qla2x00_start_scsi(srb_t *sp) 324 { 325 int nseg; 326 unsigned long flags; 327 scsi_qla_host_t *vha; 328 struct scsi_cmnd *cmd; 329 uint32_t *clr_ptr; 330 uint32_t handle; 331 cmd_entry_t *cmd_pkt; 332 uint16_t cnt; 333 uint16_t req_cnt; 334 uint16_t tot_dsds; 335 struct device_reg_2xxx __iomem *reg; 336 struct qla_hw_data *ha; 337 struct req_que *req; 338 struct rsp_que *rsp; 339 340 /* Setup device pointers. */ 341 vha = sp->vha; 342 ha = vha->hw; 343 reg = &ha->iobase->isp; 344 cmd = GET_CMD_SP(sp); 345 req = ha->req_q_map[0]; 346 rsp = ha->rsp_q_map[0]; 347 /* So we know we haven't pci_map'ed anything yet */ 348 tot_dsds = 0; 349 350 /* Send marker if required */ 351 if (vha->marker_needed != 0) { 352 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 353 QLA_SUCCESS) { 354 return (QLA_FUNCTION_FAILED); 355 } 356 vha->marker_needed = 0; 357 } 358 359 /* Acquire ring specific lock */ 360 spin_lock_irqsave(&ha->hardware_lock, flags); 361 362 handle = qla2xxx_get_next_handle(req); 363 if (handle == 0) 364 goto queuing_error; 365 366 /* Map the sg table so we have an accurate count of sg entries needed */ 367 if (scsi_sg_count(cmd)) { 368 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 369 scsi_sg_count(cmd), cmd->sc_data_direction); 370 if (unlikely(!nseg)) 371 goto queuing_error; 372 } else 373 nseg = 0; 374 375 tot_dsds = nseg; 376 377 /* Calculate the number of request entries needed. */ 378 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 379 if (req->cnt < (req_cnt + 2)) { 380 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); 381 if (req->ring_index < cnt) 382 req->cnt = cnt - req->ring_index; 383 else 384 req->cnt = req->length - 385 (req->ring_index - cnt); 386 /* If still no head room then bail out */ 387 if (req->cnt < (req_cnt + 2)) 388 goto queuing_error; 389 } 390 391 /* Build command packet */ 392 req->current_outstanding_cmd = handle; 393 req->outstanding_cmds[handle] = sp; 394 sp->handle = handle; 395 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 396 req->cnt -= req_cnt; 397 398 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 399 cmd_pkt->handle = handle; 400 /* Zero out remaining portion of packet. */ 401 clr_ptr = (uint32_t *)cmd_pkt + 2; 402 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 403 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 404 405 /* Set target ID and LUN number*/ 406 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 407 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 408 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 409 410 /* Load SCSI command packet. */ 411 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 412 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 413 414 /* Build IOCB segments */ 415 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 416 417 /* Set total data segment count. */ 418 cmd_pkt->entry_count = (uint8_t)req_cnt; 419 wmb(); 420 421 /* Adjust ring index. */ 422 req->ring_index++; 423 if (req->ring_index == req->length) { 424 req->ring_index = 0; 425 req->ring_ptr = req->ring; 426 } else 427 req->ring_ptr++; 428 429 sp->flags |= SRB_DMA_VALID; 430 431 /* Set chip new ring index. */ 432 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); 433 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 434 435 /* Manage unprocessed RIO/ZIO commands in response queue. */ 436 if (vha->flags.process_response_queue && 437 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 438 qla2x00_process_response_queue(rsp); 439 440 spin_unlock_irqrestore(&ha->hardware_lock, flags); 441 return (QLA_SUCCESS); 442 443 queuing_error: 444 if (tot_dsds) 445 scsi_dma_unmap(cmd); 446 447 spin_unlock_irqrestore(&ha->hardware_lock, flags); 448 449 return (QLA_FUNCTION_FAILED); 450 } 451 452 /** 453 * qla2x00_start_iocbs() - Execute the IOCB command 454 * @vha: HA context 455 * @req: request queue 456 */ 457 void 458 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 459 { 460 struct qla_hw_data *ha = vha->hw; 461 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 462 463 if (IS_P3P_TYPE(ha)) { 464 qla82xx_start_iocbs(vha); 465 } else { 466 /* Adjust ring index. */ 467 req->ring_index++; 468 if (req->ring_index == req->length) { 469 req->ring_index = 0; 470 req->ring_ptr = req->ring; 471 } else 472 req->ring_ptr++; 473 474 /* Set chip new ring index. */ 475 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 476 wrt_reg_dword(req->req_q_in, req->ring_index); 477 } else if (IS_QLA83XX(ha)) { 478 wrt_reg_dword(req->req_q_in, req->ring_index); 479 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); 480 } else if (IS_QLAFX00(ha)) { 481 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); 482 rd_reg_dword_relaxed(®->ispfx00.req_q_in); 483 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 484 } else if (IS_FWI2_CAPABLE(ha)) { 485 wrt_reg_dword(®->isp24.req_q_in, req->ring_index); 486 rd_reg_dword_relaxed(®->isp24.req_q_in); 487 } else { 488 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), 489 req->ring_index); 490 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); 491 } 492 } 493 } 494 495 /** 496 * __qla2x00_marker() - Send a marker IOCB to the firmware. 497 * @vha: HA context 498 * @qpair: queue pair pointer 499 * @loop_id: loop ID 500 * @lun: LUN 501 * @type: marker modifier 502 * 503 * Can be called from both normal and interrupt context. 504 * 505 * Returns non-zero if a failure occurred, else zero. 506 */ 507 static int 508 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 509 uint16_t loop_id, uint64_t lun, uint8_t type) 510 { 511 mrk_entry_t *mrk; 512 struct mrk_entry_24xx *mrk24 = NULL; 513 struct req_que *req = qpair->req; 514 struct qla_hw_data *ha = vha->hw; 515 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 516 517 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); 518 if (mrk == NULL) { 519 ql_log(ql_log_warn, base_vha, 0x3026, 520 "Failed to allocate Marker IOCB.\n"); 521 522 return (QLA_FUNCTION_FAILED); 523 } 524 525 mrk24 = (struct mrk_entry_24xx *)mrk; 526 527 mrk->entry_type = MARKER_TYPE; 528 mrk->modifier = type; 529 if (type != MK_SYNC_ALL) { 530 if (IS_FWI2_CAPABLE(ha)) { 531 mrk24->nport_handle = cpu_to_le16(loop_id); 532 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 533 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 534 mrk24->vp_index = vha->vp_idx; 535 } else { 536 SET_TARGET_ID(ha, mrk->target, loop_id); 537 mrk->lun = cpu_to_le16((uint16_t)lun); 538 } 539 } 540 541 if (IS_FWI2_CAPABLE(ha)) 542 mrk24->handle = QLA_SKIP_HANDLE; 543 544 wmb(); 545 546 qla2x00_start_iocbs(vha, req); 547 548 return (QLA_SUCCESS); 549 } 550 551 int 552 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 553 uint16_t loop_id, uint64_t lun, uint8_t type) 554 { 555 int ret; 556 unsigned long flags = 0; 557 558 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 559 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); 560 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 561 562 return (ret); 563 } 564 565 /* 566 * qla2x00_issue_marker 567 * 568 * Issue marker 569 * Caller CAN have hardware lock held as specified by ha_locked parameter. 570 * Might release it, then reaquire. 571 */ 572 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 573 { 574 if (ha_locked) { 575 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 576 MK_SYNC_ALL) != QLA_SUCCESS) 577 return QLA_FUNCTION_FAILED; 578 } else { 579 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 580 MK_SYNC_ALL) != QLA_SUCCESS) 581 return QLA_FUNCTION_FAILED; 582 } 583 vha->marker_needed = 0; 584 585 return QLA_SUCCESS; 586 } 587 588 static inline int 589 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 590 uint16_t tot_dsds) 591 { 592 struct dsd64 *cur_dsd = NULL, *next_dsd; 593 scsi_qla_host_t *vha; 594 struct qla_hw_data *ha; 595 struct scsi_cmnd *cmd; 596 struct scatterlist *cur_seg; 597 uint8_t avail_dsds; 598 uint8_t first_iocb = 1; 599 uint32_t dsd_list_len; 600 struct dsd_dma *dsd_ptr; 601 struct ct6_dsd *ctx; 602 struct qla_qpair *qpair = sp->qpair; 603 604 cmd = GET_CMD_SP(sp); 605 606 /* Update entry type to indicate Command Type 3 IOCB */ 607 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); 608 609 /* No data transfer */ 610 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || 611 tot_dsds == 0) { 612 cmd_pkt->byte_count = cpu_to_le32(0); 613 return 0; 614 } 615 616 vha = sp->vha; 617 ha = vha->hw; 618 619 /* Set transfer direction */ 620 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 621 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 622 qpair->counters.output_bytes += scsi_bufflen(cmd); 623 qpair->counters.output_requests++; 624 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 625 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 626 qpair->counters.input_bytes += scsi_bufflen(cmd); 627 qpair->counters.input_requests++; 628 } 629 630 cur_seg = scsi_sglist(cmd); 631 ctx = &sp->u.scmd.ct6_ctx; 632 633 while (tot_dsds) { 634 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 635 QLA_DSDS_PER_IOCB : tot_dsds; 636 tot_dsds -= avail_dsds; 637 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 638 639 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 640 struct dsd_dma, list); 641 next_dsd = dsd_ptr->dsd_addr; 642 list_del(&dsd_ptr->list); 643 ha->gbl_dsd_avail--; 644 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 645 ctx->dsd_use_cnt++; 646 ha->gbl_dsd_inuse++; 647 648 if (first_iocb) { 649 first_iocb = 0; 650 put_unaligned_le64(dsd_ptr->dsd_list_dma, 651 &cmd_pkt->fcp_dsd.address); 652 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); 653 } else { 654 put_unaligned_le64(dsd_ptr->dsd_list_dma, 655 &cur_dsd->address); 656 cur_dsd->length = cpu_to_le32(dsd_list_len); 657 cur_dsd++; 658 } 659 cur_dsd = next_dsd; 660 while (avail_dsds) { 661 append_dsd64(&cur_dsd, cur_seg); 662 cur_seg = sg_next(cur_seg); 663 avail_dsds--; 664 } 665 } 666 667 /* Null termination */ 668 cur_dsd->address = 0; 669 cur_dsd->length = 0; 670 cur_dsd++; 671 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 672 return 0; 673 } 674 675 /* 676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 677 * for Command Type 6. 678 * 679 * @dsds: number of data segment descriptors needed 680 * 681 * Returns the number of dsd list needed to store @dsds. 682 */ 683 static inline uint16_t 684 qla24xx_calc_dsd_lists(uint16_t dsds) 685 { 686 uint16_t dsd_lists = 0; 687 688 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 689 if (dsds % QLA_DSDS_PER_IOCB) 690 dsd_lists++; 691 return dsd_lists; 692 } 693 694 695 /** 696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 697 * IOCB types. 698 * 699 * @sp: SRB command to process 700 * @cmd_pkt: Command type 3 IOCB 701 * @tot_dsds: Total number of segments to transfer 702 * @req: pointer to request queue 703 */ 704 inline void 705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 706 uint16_t tot_dsds, struct req_que *req) 707 { 708 uint16_t avail_dsds; 709 struct dsd64 *cur_dsd; 710 scsi_qla_host_t *vha; 711 struct scsi_cmnd *cmd; 712 struct scatterlist *sg; 713 int i; 714 struct qla_qpair *qpair = sp->qpair; 715 716 cmd = GET_CMD_SP(sp); 717 718 /* Update entry type to indicate Command Type 3 IOCB */ 719 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); 720 721 /* No data transfer */ 722 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 723 cmd_pkt->byte_count = cpu_to_le32(0); 724 return; 725 } 726 727 vha = sp->vha; 728 729 /* Set transfer direction */ 730 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 731 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 732 qpair->counters.output_bytes += scsi_bufflen(cmd); 733 qpair->counters.output_requests++; 734 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 735 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 736 qpair->counters.input_bytes += scsi_bufflen(cmd); 737 qpair->counters.input_requests++; 738 } 739 740 /* One DSD is available in the Command Type 3 IOCB */ 741 avail_dsds = 1; 742 cur_dsd = &cmd_pkt->dsd; 743 744 /* Load data segments */ 745 746 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 747 cont_a64_entry_t *cont_pkt; 748 749 /* Allocate additional continuation packets? */ 750 if (avail_dsds == 0) { 751 /* 752 * Five DSDs are available in the Continuation 753 * Type 1 IOCB. 754 */ 755 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 756 cur_dsd = cont_pkt->dsd; 757 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 758 } 759 760 append_dsd64(&cur_dsd, sg); 761 avail_dsds--; 762 } 763 } 764 765 struct fw_dif_context { 766 __le32 ref_tag; 767 __le16 app_tag; 768 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 769 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 770 }; 771 772 /* 773 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 774 * 775 */ 776 static inline void 777 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 778 unsigned int protcnt) 779 { 780 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 781 782 pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd)); 783 784 if (cmd->prot_flags & SCSI_PROT_REF_CHECK && 785 qla2x00_hba_err_chk_enabled(sp)) { 786 pkt->ref_tag_mask[0] = 0xff; 787 pkt->ref_tag_mask[1] = 0xff; 788 pkt->ref_tag_mask[2] = 0xff; 789 pkt->ref_tag_mask[3] = 0xff; 790 } 791 792 pkt->app_tag = cpu_to_le16(0); 793 pkt->app_tag_mask[0] = 0x0; 794 pkt->app_tag_mask[1] = 0x0; 795 } 796 797 int 798 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 799 uint32_t *partial) 800 { 801 struct scatterlist *sg; 802 uint32_t cumulative_partial, sg_len; 803 dma_addr_t sg_dma_addr; 804 805 if (sgx->num_bytes == sgx->tot_bytes) 806 return 0; 807 808 sg = sgx->cur_sg; 809 cumulative_partial = sgx->tot_partial; 810 811 sg_dma_addr = sg_dma_address(sg); 812 sg_len = sg_dma_len(sg); 813 814 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 815 816 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 817 sgx->dma_len = (blk_sz - cumulative_partial); 818 sgx->tot_partial = 0; 819 sgx->num_bytes += blk_sz; 820 *partial = 0; 821 } else { 822 sgx->dma_len = sg_len - sgx->bytes_consumed; 823 sgx->tot_partial += sgx->dma_len; 824 *partial = 1; 825 } 826 827 sgx->bytes_consumed += sgx->dma_len; 828 829 if (sg_len == sgx->bytes_consumed) { 830 sg = sg_next(sg); 831 sgx->num_sg++; 832 sgx->cur_sg = sg; 833 sgx->bytes_consumed = 0; 834 } 835 836 return 1; 837 } 838 839 int 840 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 841 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 842 { 843 void *next_dsd; 844 uint8_t avail_dsds = 0; 845 uint32_t dsd_list_len; 846 struct dsd_dma *dsd_ptr; 847 struct scatterlist *sg_prot; 848 struct dsd64 *cur_dsd = dsd; 849 uint16_t used_dsds = tot_dsds; 850 uint32_t prot_int; /* protection interval */ 851 uint32_t partial; 852 struct qla2_sgx sgx; 853 dma_addr_t sle_dma; 854 uint32_t sle_dma_len, tot_prot_dma_len = 0; 855 struct scsi_cmnd *cmd; 856 857 memset(&sgx, 0, sizeof(struct qla2_sgx)); 858 if (sp) { 859 cmd = GET_CMD_SP(sp); 860 prot_int = scsi_prot_interval(cmd); 861 862 sgx.tot_bytes = scsi_bufflen(cmd); 863 sgx.cur_sg = scsi_sglist(cmd); 864 sgx.sp = sp; 865 866 sg_prot = scsi_prot_sglist(cmd); 867 } else if (tc) { 868 prot_int = tc->blk_sz; 869 sgx.tot_bytes = tc->bufflen; 870 sgx.cur_sg = tc->sg; 871 sg_prot = tc->prot_sg; 872 } else { 873 BUG(); 874 return 1; 875 } 876 877 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 878 879 sle_dma = sgx.dma_addr; 880 sle_dma_len = sgx.dma_len; 881 alloc_and_fill: 882 /* Allocate additional continuation packets? */ 883 if (avail_dsds == 0) { 884 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 885 QLA_DSDS_PER_IOCB : used_dsds; 886 dsd_list_len = (avail_dsds + 1) * 12; 887 used_dsds -= avail_dsds; 888 889 /* allocate tracking DS */ 890 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 891 if (!dsd_ptr) 892 return 1; 893 894 /* allocate new list */ 895 dsd_ptr->dsd_addr = next_dsd = 896 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 897 &dsd_ptr->dsd_list_dma); 898 899 if (!next_dsd) { 900 /* 901 * Need to cleanup only this dsd_ptr, rest 902 * will be done by sp_free_dma() 903 */ 904 kfree(dsd_ptr); 905 return 1; 906 } 907 908 if (sp) { 909 list_add_tail(&dsd_ptr->list, 910 &sp->u.scmd.crc_ctx->dsd_list); 911 912 sp->flags |= SRB_CRC_CTX_DSD_VALID; 913 } else { 914 list_add_tail(&dsd_ptr->list, 915 &(tc->ctx->dsd_list)); 916 *tc->ctx_dsd_alloced = 1; 917 } 918 919 920 /* add new list to cmd iocb or last list */ 921 put_unaligned_le64(dsd_ptr->dsd_list_dma, 922 &cur_dsd->address); 923 cur_dsd->length = cpu_to_le32(dsd_list_len); 924 cur_dsd = next_dsd; 925 } 926 put_unaligned_le64(sle_dma, &cur_dsd->address); 927 cur_dsd->length = cpu_to_le32(sle_dma_len); 928 cur_dsd++; 929 avail_dsds--; 930 931 if (partial == 0) { 932 /* Got a full protection interval */ 933 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 934 sle_dma_len = 8; 935 936 tot_prot_dma_len += sle_dma_len; 937 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 938 tot_prot_dma_len = 0; 939 sg_prot = sg_next(sg_prot); 940 } 941 942 partial = 1; /* So as to not re-enter this block */ 943 goto alloc_and_fill; 944 } 945 } 946 /* Null termination */ 947 cur_dsd->address = 0; 948 cur_dsd->length = 0; 949 cur_dsd++; 950 return 0; 951 } 952 953 int 954 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, 955 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 956 { 957 void *next_dsd; 958 uint8_t avail_dsds = 0; 959 uint32_t dsd_list_len; 960 struct dsd_dma *dsd_ptr; 961 struct scatterlist *sg, *sgl; 962 struct dsd64 *cur_dsd = dsd; 963 int i; 964 uint16_t used_dsds = tot_dsds; 965 struct scsi_cmnd *cmd; 966 967 if (sp) { 968 cmd = GET_CMD_SP(sp); 969 sgl = scsi_sglist(cmd); 970 } else if (tc) { 971 sgl = tc->sg; 972 } else { 973 BUG(); 974 return 1; 975 } 976 977 978 for_each_sg(sgl, sg, tot_dsds, i) { 979 /* Allocate additional continuation packets? */ 980 if (avail_dsds == 0) { 981 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 982 QLA_DSDS_PER_IOCB : used_dsds; 983 dsd_list_len = (avail_dsds + 1) * 12; 984 used_dsds -= avail_dsds; 985 986 /* allocate tracking DS */ 987 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 988 if (!dsd_ptr) 989 return 1; 990 991 /* allocate new list */ 992 dsd_ptr->dsd_addr = next_dsd = 993 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 994 &dsd_ptr->dsd_list_dma); 995 996 if (!next_dsd) { 997 /* 998 * Need to cleanup only this dsd_ptr, rest 999 * will be done by sp_free_dma() 1000 */ 1001 kfree(dsd_ptr); 1002 return 1; 1003 } 1004 1005 if (sp) { 1006 list_add_tail(&dsd_ptr->list, 1007 &sp->u.scmd.crc_ctx->dsd_list); 1008 1009 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1010 } else { 1011 list_add_tail(&dsd_ptr->list, 1012 &(tc->ctx->dsd_list)); 1013 *tc->ctx_dsd_alloced = 1; 1014 } 1015 1016 /* add new list to cmd iocb or last list */ 1017 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1018 &cur_dsd->address); 1019 cur_dsd->length = cpu_to_le32(dsd_list_len); 1020 cur_dsd = next_dsd; 1021 } 1022 append_dsd64(&cur_dsd, sg); 1023 avail_dsds--; 1024 1025 } 1026 /* Null termination */ 1027 cur_dsd->address = 0; 1028 cur_dsd->length = 0; 1029 cur_dsd++; 1030 return 0; 1031 } 1032 1033 int 1034 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1035 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1036 { 1037 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1038 struct scatterlist *sg, *sgl; 1039 struct crc_context *difctx = NULL; 1040 struct scsi_qla_host *vha; 1041 uint dsd_list_len; 1042 uint avail_dsds = 0; 1043 uint used_dsds = tot_dsds; 1044 bool dif_local_dma_alloc = false; 1045 bool direction_to_device = false; 1046 int i; 1047 1048 if (sp) { 1049 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1050 1051 sgl = scsi_prot_sglist(cmd); 1052 vha = sp->vha; 1053 difctx = sp->u.scmd.crc_ctx; 1054 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1055 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1056 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1057 __func__, cmd, difctx, sp); 1058 } else if (tc) { 1059 vha = tc->vha; 1060 sgl = tc->prot_sg; 1061 difctx = tc->ctx; 1062 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; 1063 } else { 1064 BUG(); 1065 return 1; 1066 } 1067 1068 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1069 "%s: enter (write=%u)\n", __func__, direction_to_device); 1070 1071 /* if initiator doing write or target doing read */ 1072 if (direction_to_device) { 1073 for_each_sg(sgl, sg, tot_dsds, i) { 1074 u64 sle_phys = sg_phys(sg); 1075 1076 /* If SGE addr + len flips bits in upper 32-bits */ 1077 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1078 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, 1079 "%s: page boundary crossing (phys=%llx len=%x)\n", 1080 __func__, sle_phys, sg->length); 1081 1082 if (difctx) { 1083 ha->dif_bundle_crossed_pages++; 1084 dif_local_dma_alloc = true; 1085 } else { 1086 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1087 vha, 0xe022, 1088 "%s: difctx pointer is NULL\n", 1089 __func__); 1090 } 1091 break; 1092 } 1093 } 1094 ha->dif_bundle_writes++; 1095 } else { 1096 ha->dif_bundle_reads++; 1097 } 1098 1099 if (ql2xdifbundlinginternalbuffers) 1100 dif_local_dma_alloc = direction_to_device; 1101 1102 if (dif_local_dma_alloc) { 1103 u32 track_difbundl_buf = 0; 1104 u32 ldma_sg_len = 0; 1105 u8 ldma_needed = 1; 1106 1107 difctx->no_dif_bundl = 0; 1108 difctx->dif_bundl_len = 0; 1109 1110 /* Track DSD buffers */ 1111 INIT_LIST_HEAD(&difctx->ldif_dsd_list); 1112 /* Track local DMA buffers */ 1113 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); 1114 1115 for_each_sg(sgl, sg, tot_dsds, i) { 1116 u32 sglen = sg_dma_len(sg); 1117 1118 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1119 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1120 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, 1121 difctx->dif_bundl_len, ldma_needed); 1122 1123 while (sglen) { 1124 u32 xfrlen = 0; 1125 1126 if (ldma_needed) { 1127 /* 1128 * Allocate list item to store 1129 * the DMA buffers 1130 */ 1131 dsd_ptr = kzalloc(sizeof(*dsd_ptr), 1132 GFP_ATOMIC); 1133 if (!dsd_ptr) { 1134 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1135 "%s: failed alloc dsd_ptr\n", 1136 __func__); 1137 return 1; 1138 } 1139 ha->dif_bundle_kallocs++; 1140 1141 /* allocate dma buffer */ 1142 dsd_ptr->dsd_addr = dma_pool_alloc 1143 (ha->dif_bundl_pool, GFP_ATOMIC, 1144 &dsd_ptr->dsd_list_dma); 1145 if (!dsd_ptr->dsd_addr) { 1146 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1147 "%s: failed alloc ->dsd_ptr\n", 1148 __func__); 1149 /* 1150 * need to cleanup only this 1151 * dsd_ptr rest will be done 1152 * by sp_free_dma() 1153 */ 1154 kfree(dsd_ptr); 1155 ha->dif_bundle_kallocs--; 1156 return 1; 1157 } 1158 ha->dif_bundle_dma_allocs++; 1159 ldma_needed = 0; 1160 difctx->no_dif_bundl++; 1161 list_add_tail(&dsd_ptr->list, 1162 &difctx->ldif_dma_hndl_list); 1163 } 1164 1165 /* xfrlen is min of dma pool size and sglen */ 1166 xfrlen = (sglen > 1167 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? 1168 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : 1169 sglen; 1170 1171 /* replace with local allocated dma buffer */ 1172 sg_pcopy_to_buffer(sgl, sg_nents(sgl), 1173 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, 1174 difctx->dif_bundl_len); 1175 difctx->dif_bundl_len += xfrlen; 1176 sglen -= xfrlen; 1177 ldma_sg_len += xfrlen; 1178 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || 1179 sg_is_last(sg)) { 1180 ldma_needed = 1; 1181 ldma_sg_len = 0; 1182 } 1183 } 1184 } 1185 1186 track_difbundl_buf = used_dsds = difctx->no_dif_bundl; 1187 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, 1188 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", 1189 difctx->dif_bundl_len, difctx->no_dif_bundl, 1190 track_difbundl_buf); 1191 1192 if (sp) 1193 sp->flags |= SRB_DIF_BUNDL_DMA_VALID; 1194 else 1195 tc->prot_flags = DIF_BUNDL_DMA_VALID; 1196 1197 list_for_each_entry_safe(dif_dsd, nxt_dsd, 1198 &difctx->ldif_dma_hndl_list, list) { 1199 u32 sglen = (difctx->dif_bundl_len > 1200 DIF_BUNDLING_DMA_POOL_SIZE) ? 1201 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; 1202 1203 BUG_ON(track_difbundl_buf == 0); 1204 1205 /* Allocate additional continuation packets? */ 1206 if (avail_dsds == 0) { 1207 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 1208 0xe024, 1209 "%s: adding continuation iocb's\n", 1210 __func__); 1211 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1212 QLA_DSDS_PER_IOCB : used_dsds; 1213 dsd_list_len = (avail_dsds + 1) * 12; 1214 used_dsds -= avail_dsds; 1215 1216 /* allocate tracking DS */ 1217 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1218 if (!dsd_ptr) { 1219 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1220 "%s: failed alloc dsd_ptr\n", 1221 __func__); 1222 return 1; 1223 } 1224 ha->dif_bundle_kallocs++; 1225 1226 difctx->no_ldif_dsd++; 1227 /* allocate new list */ 1228 dsd_ptr->dsd_addr = 1229 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1230 &dsd_ptr->dsd_list_dma); 1231 if (!dsd_ptr->dsd_addr) { 1232 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1233 "%s: failed alloc ->dsd_addr\n", 1234 __func__); 1235 /* 1236 * need to cleanup only this dsd_ptr 1237 * rest will be done by sp_free_dma() 1238 */ 1239 kfree(dsd_ptr); 1240 ha->dif_bundle_kallocs--; 1241 return 1; 1242 } 1243 ha->dif_bundle_dma_allocs++; 1244 1245 if (sp) { 1246 list_add_tail(&dsd_ptr->list, 1247 &difctx->ldif_dsd_list); 1248 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1249 } else { 1250 list_add_tail(&dsd_ptr->list, 1251 &difctx->ldif_dsd_list); 1252 tc->ctx_dsd_alloced = 1; 1253 } 1254 1255 /* add new list to cmd iocb or last list */ 1256 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1257 &cur_dsd->address); 1258 cur_dsd->length = cpu_to_le32(dsd_list_len); 1259 cur_dsd = dsd_ptr->dsd_addr; 1260 } 1261 put_unaligned_le64(dif_dsd->dsd_list_dma, 1262 &cur_dsd->address); 1263 cur_dsd->length = cpu_to_le32(sglen); 1264 cur_dsd++; 1265 avail_dsds--; 1266 difctx->dif_bundl_len -= sglen; 1267 track_difbundl_buf--; 1268 } 1269 1270 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, 1271 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, 1272 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1273 } else { 1274 for_each_sg(sgl, sg, tot_dsds, i) { 1275 /* Allocate additional continuation packets? */ 1276 if (avail_dsds == 0) { 1277 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1278 QLA_DSDS_PER_IOCB : used_dsds; 1279 dsd_list_len = (avail_dsds + 1) * 12; 1280 used_dsds -= avail_dsds; 1281 1282 /* allocate tracking DS */ 1283 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1284 if (!dsd_ptr) { 1285 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1286 vha, 0xe027, 1287 "%s: failed alloc dsd_dma...\n", 1288 __func__); 1289 return 1; 1290 } 1291 1292 /* allocate new list */ 1293 dsd_ptr->dsd_addr = 1294 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1295 &dsd_ptr->dsd_list_dma); 1296 if (!dsd_ptr->dsd_addr) { 1297 /* need to cleanup only this dsd_ptr */ 1298 /* rest will be done by sp_free_dma() */ 1299 kfree(dsd_ptr); 1300 return 1; 1301 } 1302 1303 if (sp) { 1304 list_add_tail(&dsd_ptr->list, 1305 &difctx->dsd_list); 1306 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1307 } else { 1308 list_add_tail(&dsd_ptr->list, 1309 &difctx->dsd_list); 1310 tc->ctx_dsd_alloced = 1; 1311 } 1312 1313 /* add new list to cmd iocb or last list */ 1314 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1315 &cur_dsd->address); 1316 cur_dsd->length = cpu_to_le32(dsd_list_len); 1317 cur_dsd = dsd_ptr->dsd_addr; 1318 } 1319 append_dsd64(&cur_dsd, sg); 1320 avail_dsds--; 1321 } 1322 } 1323 /* Null termination */ 1324 cur_dsd->address = 0; 1325 cur_dsd->length = 0; 1326 cur_dsd++; 1327 return 0; 1328 } 1329 1330 /** 1331 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1332 * Type 6 IOCB types. 1333 * 1334 * @sp: SRB command to process 1335 * @cmd_pkt: Command type 3 IOCB 1336 * @tot_dsds: Total number of segments to transfer 1337 * @tot_prot_dsds: Total number of segments with protection information 1338 * @fw_prot_opts: Protection options to be passed to firmware 1339 */ 1340 static inline int 1341 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1342 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1343 { 1344 struct dsd64 *cur_dsd; 1345 __be32 *fcp_dl; 1346 scsi_qla_host_t *vha; 1347 struct scsi_cmnd *cmd; 1348 uint32_t total_bytes = 0; 1349 uint32_t data_bytes; 1350 uint32_t dif_bytes; 1351 uint8_t bundling = 1; 1352 uint16_t blk_size; 1353 struct crc_context *crc_ctx_pkt = NULL; 1354 struct qla_hw_data *ha; 1355 uint8_t additional_fcpcdb_len; 1356 uint16_t fcp_cmnd_len; 1357 struct fcp_cmnd *fcp_cmnd; 1358 dma_addr_t crc_ctx_dma; 1359 1360 cmd = GET_CMD_SP(sp); 1361 1362 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1363 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); 1364 1365 vha = sp->vha; 1366 ha = vha->hw; 1367 1368 /* No data transfer */ 1369 data_bytes = scsi_bufflen(cmd); 1370 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1371 cmd_pkt->byte_count = cpu_to_le32(0); 1372 return QLA_SUCCESS; 1373 } 1374 1375 cmd_pkt->vp_index = sp->vha->vp_idx; 1376 1377 /* Set transfer direction */ 1378 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1379 cmd_pkt->control_flags = 1380 cpu_to_le16(CF_WRITE_DATA); 1381 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1382 cmd_pkt->control_flags = 1383 cpu_to_le16(CF_READ_DATA); 1384 } 1385 1386 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1387 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1388 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1389 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1390 bundling = 0; 1391 1392 /* Allocate CRC context from global pool */ 1393 crc_ctx_pkt = sp->u.scmd.crc_ctx = 1394 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1395 1396 if (!crc_ctx_pkt) 1397 goto crc_queuing_error; 1398 1399 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1400 1401 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1402 1403 /* Set handle */ 1404 crc_ctx_pkt->handle = cmd_pkt->handle; 1405 1406 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1407 1408 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1409 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1410 1411 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); 1412 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 1413 1414 /* Determine SCSI command length -- align to 4 byte boundary */ 1415 if (cmd->cmd_len > 16) { 1416 additional_fcpcdb_len = cmd->cmd_len - 16; 1417 if ((cmd->cmd_len % 4) != 0) { 1418 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1419 goto crc_queuing_error; 1420 } 1421 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1422 } else { 1423 additional_fcpcdb_len = 0; 1424 fcp_cmnd_len = 12 + 16 + 4; 1425 } 1426 1427 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1428 1429 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1430 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1431 fcp_cmnd->additional_cdb_len |= 1; 1432 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1433 fcp_cmnd->additional_cdb_len |= 2; 1434 1435 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1436 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1437 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1438 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, 1439 &cmd_pkt->fcp_cmnd_dseg_address); 1440 fcp_cmnd->task_management = 0; 1441 fcp_cmnd->task_attribute = TSK_SIMPLE; 1442 1443 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1444 1445 /* Compute dif len and adjust data len to incude protection */ 1446 dif_bytes = 0; 1447 blk_size = cmd->device->sector_size; 1448 dif_bytes = (data_bytes / blk_size) * 8; 1449 1450 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1451 case SCSI_PROT_READ_INSERT: 1452 case SCSI_PROT_WRITE_STRIP: 1453 total_bytes = data_bytes; 1454 data_bytes += dif_bytes; 1455 break; 1456 1457 case SCSI_PROT_READ_STRIP: 1458 case SCSI_PROT_WRITE_INSERT: 1459 case SCSI_PROT_READ_PASS: 1460 case SCSI_PROT_WRITE_PASS: 1461 total_bytes = data_bytes + dif_bytes; 1462 break; 1463 default: 1464 BUG(); 1465 } 1466 1467 if (!qla2x00_hba_err_chk_enabled(sp)) 1468 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1469 /* HBA error checking enabled */ 1470 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1471 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1472 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1473 SCSI_PROT_DIF_TYPE2)) 1474 fw_prot_opts |= BIT_10; 1475 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1476 SCSI_PROT_DIF_TYPE3) 1477 fw_prot_opts |= BIT_11; 1478 } 1479 1480 if (!bundling) { 1481 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 1482 } else { 1483 /* 1484 * Configure Bundling if we need to fetch interlaving 1485 * protection PCI accesses 1486 */ 1487 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1488 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1489 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1490 tot_prot_dsds); 1491 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 1492 } 1493 1494 /* Finish the common fields of CRC pkt */ 1495 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1496 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1497 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1498 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1499 /* Fibre channel byte count */ 1500 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1501 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1502 additional_fcpcdb_len); 1503 *fcp_dl = htonl(total_bytes); 1504 1505 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1506 cmd_pkt->byte_count = cpu_to_le32(0); 1507 return QLA_SUCCESS; 1508 } 1509 /* Walks data segments */ 1510 1511 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1512 1513 if (!bundling && tot_prot_dsds) { 1514 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1515 cur_dsd, tot_dsds, NULL)) 1516 goto crc_queuing_error; 1517 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1518 (tot_dsds - tot_prot_dsds), NULL)) 1519 goto crc_queuing_error; 1520 1521 if (bundling && tot_prot_dsds) { 1522 /* Walks dif segments */ 1523 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1524 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 1525 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1526 tot_prot_dsds, NULL)) 1527 goto crc_queuing_error; 1528 } 1529 return QLA_SUCCESS; 1530 1531 crc_queuing_error: 1532 /* Cleanup will be performed by the caller */ 1533 1534 return QLA_FUNCTION_FAILED; 1535 } 1536 1537 /** 1538 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1539 * @sp: command to send to the ISP 1540 * 1541 * Returns non-zero if a failure occurred, else zero. 1542 */ 1543 int 1544 qla24xx_start_scsi(srb_t *sp) 1545 { 1546 int nseg; 1547 unsigned long flags; 1548 uint32_t *clr_ptr; 1549 uint32_t handle; 1550 struct cmd_type_7 *cmd_pkt; 1551 uint16_t cnt; 1552 uint16_t req_cnt; 1553 uint16_t tot_dsds; 1554 struct req_que *req = NULL; 1555 struct rsp_que *rsp; 1556 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1557 struct scsi_qla_host *vha = sp->vha; 1558 struct qla_hw_data *ha = vha->hw; 1559 1560 if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) 1561 return qla28xx_start_scsi_edif(sp); 1562 1563 /* Setup device pointers. */ 1564 req = vha->req; 1565 rsp = req->rsp; 1566 1567 /* So we know we haven't pci_map'ed anything yet */ 1568 tot_dsds = 0; 1569 1570 /* Send marker if required */ 1571 if (vha->marker_needed != 0) { 1572 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1573 QLA_SUCCESS) 1574 return QLA_FUNCTION_FAILED; 1575 vha->marker_needed = 0; 1576 } 1577 1578 /* Acquire ring specific lock */ 1579 spin_lock_irqsave(&ha->hardware_lock, flags); 1580 1581 handle = qla2xxx_get_next_handle(req); 1582 if (handle == 0) 1583 goto queuing_error; 1584 1585 /* Map the sg table so we have an accurate count of sg entries needed */ 1586 if (scsi_sg_count(cmd)) { 1587 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1588 scsi_sg_count(cmd), cmd->sc_data_direction); 1589 if (unlikely(!nseg)) 1590 goto queuing_error; 1591 } else 1592 nseg = 0; 1593 1594 tot_dsds = nseg; 1595 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1596 1597 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; 1598 sp->iores.exch_cnt = 1; 1599 sp->iores.iocb_cnt = req_cnt; 1600 if (qla_get_fw_resources(sp->qpair, &sp->iores)) 1601 goto queuing_error; 1602 1603 if (req->cnt < (req_cnt + 2)) { 1604 if (IS_SHADOW_REG_CAPABLE(ha)) { 1605 cnt = *req->out_ptr; 1606 } else { 1607 cnt = rd_reg_dword_relaxed(req->req_q_out); 1608 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 1609 goto queuing_error; 1610 } 1611 1612 if (req->ring_index < cnt) 1613 req->cnt = cnt - req->ring_index; 1614 else 1615 req->cnt = req->length - 1616 (req->ring_index - cnt); 1617 if (req->cnt < (req_cnt + 2)) 1618 goto queuing_error; 1619 } 1620 1621 /* Build command packet. */ 1622 req->current_outstanding_cmd = handle; 1623 req->outstanding_cmds[handle] = sp; 1624 sp->handle = handle; 1625 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1626 req->cnt -= req_cnt; 1627 1628 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1629 cmd_pkt->handle = make_handle(req->id, handle); 1630 1631 /* Zero out remaining portion of packet. */ 1632 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1633 clr_ptr = (uint32_t *)cmd_pkt + 2; 1634 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1635 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1636 1637 /* Set NPORT-ID and LUN number*/ 1638 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1639 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1640 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1641 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1642 cmd_pkt->vp_index = sp->vha->vp_idx; 1643 1644 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1645 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1646 1647 cmd_pkt->task = TSK_SIMPLE; 1648 1649 /* Load SCSI command packet. */ 1650 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1651 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1652 1653 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1654 1655 /* Build IOCB segments */ 1656 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1657 1658 /* Set total data segment count. */ 1659 cmd_pkt->entry_count = (uint8_t)req_cnt; 1660 wmb(); 1661 /* Adjust ring index. */ 1662 req->ring_index++; 1663 if (req->ring_index == req->length) { 1664 req->ring_index = 0; 1665 req->ring_ptr = req->ring; 1666 } else 1667 req->ring_ptr++; 1668 1669 sp->qpair->cmd_cnt++; 1670 sp->flags |= SRB_DMA_VALID; 1671 1672 /* Set chip new ring index. */ 1673 wrt_reg_dword(req->req_q_in, req->ring_index); 1674 1675 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1676 if (vha->flags.process_response_queue && 1677 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1678 qla24xx_process_response_queue(vha, rsp); 1679 1680 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1681 return QLA_SUCCESS; 1682 1683 queuing_error: 1684 if (tot_dsds) 1685 scsi_dma_unmap(cmd); 1686 1687 qla_put_fw_resources(sp->qpair, &sp->iores); 1688 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1689 1690 return QLA_FUNCTION_FAILED; 1691 } 1692 1693 /** 1694 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1695 * @sp: command to send to the ISP 1696 * 1697 * Returns non-zero if a failure occurred, else zero. 1698 */ 1699 int 1700 qla24xx_dif_start_scsi(srb_t *sp) 1701 { 1702 int nseg; 1703 unsigned long flags; 1704 uint32_t *clr_ptr; 1705 uint32_t handle; 1706 uint16_t cnt; 1707 uint16_t req_cnt = 0; 1708 uint16_t tot_dsds; 1709 uint16_t tot_prot_dsds; 1710 uint16_t fw_prot_opts = 0; 1711 struct req_que *req = NULL; 1712 struct rsp_que *rsp = NULL; 1713 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1714 struct scsi_qla_host *vha = sp->vha; 1715 struct qla_hw_data *ha = vha->hw; 1716 struct cmd_type_crc_2 *cmd_pkt; 1717 uint32_t status = 0; 1718 1719 #define QDSS_GOT_Q_SPACE BIT_0 1720 1721 /* Only process protection or >16 cdb in this routine */ 1722 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1723 if (cmd->cmd_len <= 16) 1724 return qla24xx_start_scsi(sp); 1725 } 1726 1727 /* Setup device pointers. */ 1728 req = vha->req; 1729 rsp = req->rsp; 1730 1731 /* So we know we haven't pci_map'ed anything yet */ 1732 tot_dsds = 0; 1733 1734 /* Send marker if required */ 1735 if (vha->marker_needed != 0) { 1736 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1737 QLA_SUCCESS) 1738 return QLA_FUNCTION_FAILED; 1739 vha->marker_needed = 0; 1740 } 1741 1742 /* Acquire ring specific lock */ 1743 spin_lock_irqsave(&ha->hardware_lock, flags); 1744 1745 handle = qla2xxx_get_next_handle(req); 1746 if (handle == 0) 1747 goto queuing_error; 1748 1749 /* Compute number of required data segments */ 1750 /* Map the sg table so we have an accurate count of sg entries needed */ 1751 if (scsi_sg_count(cmd)) { 1752 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1753 scsi_sg_count(cmd), cmd->sc_data_direction); 1754 if (unlikely(!nseg)) 1755 goto queuing_error; 1756 else 1757 sp->flags |= SRB_DMA_VALID; 1758 1759 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1760 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1761 struct qla2_sgx sgx; 1762 uint32_t partial; 1763 1764 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1765 sgx.tot_bytes = scsi_bufflen(cmd); 1766 sgx.cur_sg = scsi_sglist(cmd); 1767 sgx.sp = sp; 1768 1769 nseg = 0; 1770 while (qla24xx_get_one_block_sg( 1771 cmd->device->sector_size, &sgx, &partial)) 1772 nseg++; 1773 } 1774 } else 1775 nseg = 0; 1776 1777 /* number of required data segments */ 1778 tot_dsds = nseg; 1779 1780 /* Compute number of required protection segments */ 1781 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1782 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1783 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1784 if (unlikely(!nseg)) 1785 goto queuing_error; 1786 else 1787 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1788 1789 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1790 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1791 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1792 } 1793 } else { 1794 nseg = 0; 1795 } 1796 1797 req_cnt = 1; 1798 /* Total Data and protection sg segment(s) */ 1799 tot_prot_dsds = nseg; 1800 tot_dsds += nseg; 1801 1802 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; 1803 sp->iores.exch_cnt = 1; 1804 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1805 if (qla_get_fw_resources(sp->qpair, &sp->iores)) 1806 goto queuing_error; 1807 1808 if (req->cnt < (req_cnt + 2)) { 1809 if (IS_SHADOW_REG_CAPABLE(ha)) { 1810 cnt = *req->out_ptr; 1811 } else { 1812 cnt = rd_reg_dword_relaxed(req->req_q_out); 1813 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 1814 goto queuing_error; 1815 } 1816 if (req->ring_index < cnt) 1817 req->cnt = cnt - req->ring_index; 1818 else 1819 req->cnt = req->length - 1820 (req->ring_index - cnt); 1821 if (req->cnt < (req_cnt + 2)) 1822 goto queuing_error; 1823 } 1824 1825 status |= QDSS_GOT_Q_SPACE; 1826 1827 /* Build header part of command packet (excluding the OPCODE). */ 1828 req->current_outstanding_cmd = handle; 1829 req->outstanding_cmds[handle] = sp; 1830 sp->handle = handle; 1831 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1832 req->cnt -= req_cnt; 1833 1834 /* Fill-in common area */ 1835 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1836 cmd_pkt->handle = make_handle(req->id, handle); 1837 1838 clr_ptr = (uint32_t *)cmd_pkt + 2; 1839 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1840 1841 /* Set NPORT-ID and LUN number*/ 1842 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1843 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1844 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1845 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1846 1847 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1848 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1849 1850 /* Total Data and protection segment(s) */ 1851 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1852 1853 /* Build IOCB segments and adjust for data protection segments */ 1854 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1855 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1856 QLA_SUCCESS) 1857 goto queuing_error; 1858 1859 cmd_pkt->entry_count = (uint8_t)req_cnt; 1860 /* Specify response queue number where completion should happen */ 1861 cmd_pkt->entry_status = (uint8_t) rsp->id; 1862 cmd_pkt->timeout = cpu_to_le16(0); 1863 wmb(); 1864 1865 /* Adjust ring index. */ 1866 req->ring_index++; 1867 if (req->ring_index == req->length) { 1868 req->ring_index = 0; 1869 req->ring_ptr = req->ring; 1870 } else 1871 req->ring_ptr++; 1872 1873 sp->qpair->cmd_cnt++; 1874 /* Set chip new ring index. */ 1875 wrt_reg_dword(req->req_q_in, req->ring_index); 1876 1877 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1878 if (vha->flags.process_response_queue && 1879 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1880 qla24xx_process_response_queue(vha, rsp); 1881 1882 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1883 1884 return QLA_SUCCESS; 1885 1886 queuing_error: 1887 if (status & QDSS_GOT_Q_SPACE) { 1888 req->outstanding_cmds[handle] = NULL; 1889 req->cnt += req_cnt; 1890 } 1891 /* Cleanup will be performed by the caller (queuecommand) */ 1892 1893 qla_put_fw_resources(sp->qpair, &sp->iores); 1894 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1895 1896 return QLA_FUNCTION_FAILED; 1897 } 1898 1899 /** 1900 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1901 * @sp: command to send to the ISP 1902 * 1903 * Returns non-zero if a failure occurred, else zero. 1904 */ 1905 static int 1906 qla2xxx_start_scsi_mq(srb_t *sp) 1907 { 1908 int nseg; 1909 unsigned long flags; 1910 uint32_t *clr_ptr; 1911 uint32_t handle; 1912 struct cmd_type_7 *cmd_pkt; 1913 uint16_t cnt; 1914 uint16_t req_cnt; 1915 uint16_t tot_dsds; 1916 struct req_que *req = NULL; 1917 struct rsp_que *rsp; 1918 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1919 struct scsi_qla_host *vha = sp->fcport->vha; 1920 struct qla_hw_data *ha = vha->hw; 1921 struct qla_qpair *qpair = sp->qpair; 1922 1923 if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) 1924 return qla28xx_start_scsi_edif(sp); 1925 1926 /* Acquire qpair specific lock */ 1927 spin_lock_irqsave(&qpair->qp_lock, flags); 1928 1929 /* Setup qpair pointers */ 1930 req = qpair->req; 1931 rsp = qpair->rsp; 1932 1933 /* So we know we haven't pci_map'ed anything yet */ 1934 tot_dsds = 0; 1935 1936 /* Send marker if required */ 1937 if (vha->marker_needed != 0) { 1938 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 1939 QLA_SUCCESS) { 1940 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1941 return QLA_FUNCTION_FAILED; 1942 } 1943 vha->marker_needed = 0; 1944 } 1945 1946 handle = qla2xxx_get_next_handle(req); 1947 if (handle == 0) 1948 goto queuing_error; 1949 1950 /* Map the sg table so we have an accurate count of sg entries needed */ 1951 if (scsi_sg_count(cmd)) { 1952 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1953 scsi_sg_count(cmd), cmd->sc_data_direction); 1954 if (unlikely(!nseg)) 1955 goto queuing_error; 1956 } else 1957 nseg = 0; 1958 1959 tot_dsds = nseg; 1960 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1961 1962 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; 1963 sp->iores.exch_cnt = 1; 1964 sp->iores.iocb_cnt = req_cnt; 1965 if (qla_get_fw_resources(sp->qpair, &sp->iores)) 1966 goto queuing_error; 1967 1968 if (req->cnt < (req_cnt + 2)) { 1969 if (IS_SHADOW_REG_CAPABLE(ha)) { 1970 cnt = *req->out_ptr; 1971 } else { 1972 cnt = rd_reg_dword_relaxed(req->req_q_out); 1973 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 1974 goto queuing_error; 1975 } 1976 1977 if (req->ring_index < cnt) 1978 req->cnt = cnt - req->ring_index; 1979 else 1980 req->cnt = req->length - 1981 (req->ring_index - cnt); 1982 if (req->cnt < (req_cnt + 2)) 1983 goto queuing_error; 1984 } 1985 1986 /* Build command packet. */ 1987 req->current_outstanding_cmd = handle; 1988 req->outstanding_cmds[handle] = sp; 1989 sp->handle = handle; 1990 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1991 req->cnt -= req_cnt; 1992 1993 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1994 cmd_pkt->handle = make_handle(req->id, handle); 1995 1996 /* Zero out remaining portion of packet. */ 1997 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1998 clr_ptr = (uint32_t *)cmd_pkt + 2; 1999 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2000 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2001 2002 /* Set NPORT-ID and LUN number*/ 2003 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2004 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2005 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2006 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2007 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2008 2009 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2010 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2011 2012 cmd_pkt->task = TSK_SIMPLE; 2013 2014 /* Load SCSI command packet. */ 2015 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2016 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2017 2018 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2019 2020 /* Build IOCB segments */ 2021 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2022 2023 /* Set total data segment count. */ 2024 cmd_pkt->entry_count = (uint8_t)req_cnt; 2025 wmb(); 2026 /* Adjust ring index. */ 2027 req->ring_index++; 2028 if (req->ring_index == req->length) { 2029 req->ring_index = 0; 2030 req->ring_ptr = req->ring; 2031 } else 2032 req->ring_ptr++; 2033 2034 sp->qpair->cmd_cnt++; 2035 sp->flags |= SRB_DMA_VALID; 2036 2037 /* Set chip new ring index. */ 2038 wrt_reg_dword(req->req_q_in, req->ring_index); 2039 2040 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2041 if (vha->flags.process_response_queue && 2042 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2043 qla24xx_process_response_queue(vha, rsp); 2044 2045 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2046 return QLA_SUCCESS; 2047 2048 queuing_error: 2049 if (tot_dsds) 2050 scsi_dma_unmap(cmd); 2051 2052 qla_put_fw_resources(sp->qpair, &sp->iores); 2053 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2054 2055 return QLA_FUNCTION_FAILED; 2056 } 2057 2058 2059 /** 2060 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 2061 * @sp: command to send to the ISP 2062 * 2063 * Returns non-zero if a failure occurred, else zero. 2064 */ 2065 int 2066 qla2xxx_dif_start_scsi_mq(srb_t *sp) 2067 { 2068 int nseg; 2069 unsigned long flags; 2070 uint32_t *clr_ptr; 2071 uint32_t handle; 2072 uint16_t cnt; 2073 uint16_t req_cnt = 0; 2074 uint16_t tot_dsds; 2075 uint16_t tot_prot_dsds; 2076 uint16_t fw_prot_opts = 0; 2077 struct req_que *req = NULL; 2078 struct rsp_que *rsp = NULL; 2079 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2080 struct scsi_qla_host *vha = sp->fcport->vha; 2081 struct qla_hw_data *ha = vha->hw; 2082 struct cmd_type_crc_2 *cmd_pkt; 2083 uint32_t status = 0; 2084 struct qla_qpair *qpair = sp->qpair; 2085 2086 #define QDSS_GOT_Q_SPACE BIT_0 2087 2088 /* Check for host side state */ 2089 if (!qpair->online) { 2090 cmd->result = DID_NO_CONNECT << 16; 2091 return QLA_INTERFACE_ERROR; 2092 } 2093 2094 if (!qpair->difdix_supported && 2095 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2096 cmd->result = DID_NO_CONNECT << 16; 2097 return QLA_INTERFACE_ERROR; 2098 } 2099 2100 /* Only process protection or >16 cdb in this routine */ 2101 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 2102 if (cmd->cmd_len <= 16) 2103 return qla2xxx_start_scsi_mq(sp); 2104 } 2105 2106 spin_lock_irqsave(&qpair->qp_lock, flags); 2107 2108 /* Setup qpair pointers */ 2109 rsp = qpair->rsp; 2110 req = qpair->req; 2111 2112 /* So we know we haven't pci_map'ed anything yet */ 2113 tot_dsds = 0; 2114 2115 /* Send marker if required */ 2116 if (vha->marker_needed != 0) { 2117 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 2118 QLA_SUCCESS) { 2119 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2120 return QLA_FUNCTION_FAILED; 2121 } 2122 vha->marker_needed = 0; 2123 } 2124 2125 handle = qla2xxx_get_next_handle(req); 2126 if (handle == 0) 2127 goto queuing_error; 2128 2129 /* Compute number of required data segments */ 2130 /* Map the sg table so we have an accurate count of sg entries needed */ 2131 if (scsi_sg_count(cmd)) { 2132 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2133 scsi_sg_count(cmd), cmd->sc_data_direction); 2134 if (unlikely(!nseg)) 2135 goto queuing_error; 2136 else 2137 sp->flags |= SRB_DMA_VALID; 2138 2139 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2140 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2141 struct qla2_sgx sgx; 2142 uint32_t partial; 2143 2144 memset(&sgx, 0, sizeof(struct qla2_sgx)); 2145 sgx.tot_bytes = scsi_bufflen(cmd); 2146 sgx.cur_sg = scsi_sglist(cmd); 2147 sgx.sp = sp; 2148 2149 nseg = 0; 2150 while (qla24xx_get_one_block_sg( 2151 cmd->device->sector_size, &sgx, &partial)) 2152 nseg++; 2153 } 2154 } else 2155 nseg = 0; 2156 2157 /* number of required data segments */ 2158 tot_dsds = nseg; 2159 2160 /* Compute number of required protection segments */ 2161 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2162 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2163 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2164 if (unlikely(!nseg)) 2165 goto queuing_error; 2166 else 2167 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2168 2169 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2170 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2171 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2172 } 2173 } else { 2174 nseg = 0; 2175 } 2176 2177 req_cnt = 1; 2178 /* Total Data and protection sg segment(s) */ 2179 tot_prot_dsds = nseg; 2180 tot_dsds += nseg; 2181 2182 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH; 2183 sp->iores.exch_cnt = 1; 2184 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2185 if (qla_get_fw_resources(sp->qpair, &sp->iores)) 2186 goto queuing_error; 2187 2188 if (req->cnt < (req_cnt + 2)) { 2189 if (IS_SHADOW_REG_CAPABLE(ha)) { 2190 cnt = *req->out_ptr; 2191 } else { 2192 cnt = rd_reg_dword_relaxed(req->req_q_out); 2193 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 2194 goto queuing_error; 2195 } 2196 2197 if (req->ring_index < cnt) 2198 req->cnt = cnt - req->ring_index; 2199 else 2200 req->cnt = req->length - 2201 (req->ring_index - cnt); 2202 if (req->cnt < (req_cnt + 2)) 2203 goto queuing_error; 2204 } 2205 2206 status |= QDSS_GOT_Q_SPACE; 2207 2208 /* Build header part of command packet (excluding the OPCODE). */ 2209 req->current_outstanding_cmd = handle; 2210 req->outstanding_cmds[handle] = sp; 2211 sp->handle = handle; 2212 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2213 req->cnt -= req_cnt; 2214 2215 /* Fill-in common area */ 2216 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2217 cmd_pkt->handle = make_handle(req->id, handle); 2218 2219 clr_ptr = (uint32_t *)cmd_pkt + 2; 2220 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2221 2222 /* Set NPORT-ID and LUN number*/ 2223 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2224 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2225 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2226 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2227 2228 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2229 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2230 2231 /* Total Data and protection segment(s) */ 2232 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2233 2234 /* Build IOCB segments and adjust for data protection segments */ 2235 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2236 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2237 QLA_SUCCESS) 2238 goto queuing_error; 2239 2240 cmd_pkt->entry_count = (uint8_t)req_cnt; 2241 cmd_pkt->timeout = cpu_to_le16(0); 2242 wmb(); 2243 2244 /* Adjust ring index. */ 2245 req->ring_index++; 2246 if (req->ring_index == req->length) { 2247 req->ring_index = 0; 2248 req->ring_ptr = req->ring; 2249 } else 2250 req->ring_ptr++; 2251 2252 sp->qpair->cmd_cnt++; 2253 /* Set chip new ring index. */ 2254 wrt_reg_dword(req->req_q_in, req->ring_index); 2255 2256 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2257 if (vha->flags.process_response_queue && 2258 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2259 qla24xx_process_response_queue(vha, rsp); 2260 2261 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2262 2263 return QLA_SUCCESS; 2264 2265 queuing_error: 2266 if (status & QDSS_GOT_Q_SPACE) { 2267 req->outstanding_cmds[handle] = NULL; 2268 req->cnt += req_cnt; 2269 } 2270 /* Cleanup will be performed by the caller (queuecommand) */ 2271 2272 qla_put_fw_resources(sp->qpair, &sp->iores); 2273 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2274 2275 return QLA_FUNCTION_FAILED; 2276 } 2277 2278 /* Generic Control-SRB manipulation functions. */ 2279 2280 /* hardware_lock assumed to be held. */ 2281 2282 void * 2283 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) 2284 { 2285 scsi_qla_host_t *vha = qpair->vha; 2286 struct qla_hw_data *ha = vha->hw; 2287 struct req_que *req = qpair->req; 2288 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2289 uint32_t handle; 2290 request_t *pkt; 2291 uint16_t cnt, req_cnt; 2292 2293 pkt = NULL; 2294 req_cnt = 1; 2295 handle = 0; 2296 2297 if (sp && (sp->type != SRB_SCSI_CMD)) { 2298 /* Adjust entry-counts as needed. */ 2299 req_cnt = sp->iocbs; 2300 } 2301 2302 /* Check for room on request queue. */ 2303 if (req->cnt < req_cnt + 2) { 2304 if (qpair->use_shadow_reg) 2305 cnt = *req->out_ptr; 2306 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2307 IS_QLA28XX(ha)) 2308 cnt = rd_reg_dword(®->isp25mq.req_q_out); 2309 else if (IS_P3P_TYPE(ha)) 2310 cnt = rd_reg_dword(reg->isp82.req_q_out); 2311 else if (IS_FWI2_CAPABLE(ha)) 2312 cnt = rd_reg_dword(®->isp24.req_q_out); 2313 else if (IS_QLAFX00(ha)) 2314 cnt = rd_reg_dword(®->ispfx00.req_q_out); 2315 else 2316 cnt = qla2x00_debounce_register( 2317 ISP_REQ_Q_OUT(ha, ®->isp)); 2318 2319 if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { 2320 qla_schedule_eeh_work(vha); 2321 return NULL; 2322 } 2323 2324 if (req->ring_index < cnt) 2325 req->cnt = cnt - req->ring_index; 2326 else 2327 req->cnt = req->length - 2328 (req->ring_index - cnt); 2329 } 2330 if (req->cnt < req_cnt + 2) 2331 goto queuing_error; 2332 2333 if (sp) { 2334 handle = qla2xxx_get_next_handle(req); 2335 if (handle == 0) { 2336 ql_log(ql_log_warn, vha, 0x700b, 2337 "No room on outstanding cmd array.\n"); 2338 goto queuing_error; 2339 } 2340 2341 /* Prep command array. */ 2342 req->current_outstanding_cmd = handle; 2343 req->outstanding_cmds[handle] = sp; 2344 sp->handle = handle; 2345 } 2346 2347 /* Prep packet */ 2348 req->cnt -= req_cnt; 2349 pkt = req->ring_ptr; 2350 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2351 if (IS_QLAFX00(ha)) { 2352 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); 2353 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); 2354 } else { 2355 pkt->entry_count = req_cnt; 2356 pkt->handle = handle; 2357 } 2358 2359 return pkt; 2360 2361 queuing_error: 2362 qpair->tgt_counters.num_alloc_iocb_failed++; 2363 return pkt; 2364 } 2365 2366 void * 2367 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) 2368 { 2369 scsi_qla_host_t *vha = qpair->vha; 2370 2371 if (qla2x00_reset_active(vha)) 2372 return NULL; 2373 2374 return __qla2x00_alloc_iocbs(qpair, sp); 2375 } 2376 2377 void * 2378 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) 2379 { 2380 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); 2381 } 2382 2383 static void 2384 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2385 { 2386 struct srb_iocb *lio = &sp->u.iocb_cmd; 2387 2388 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2389 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2390 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { 2391 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); 2392 if (sp->vha->flags.nvme_first_burst) 2393 logio->io_parameter[0] = 2394 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); 2395 if (sp->vha->flags.nvme2_enabled) { 2396 /* Set service parameter BIT_7 for NVME CONF support */ 2397 logio->io_parameter[0] |= 2398 cpu_to_le32(NVME_PRLI_SP_CONF); 2399 /* Set service parameter BIT_8 for SLER support */ 2400 logio->io_parameter[0] |= 2401 cpu_to_le32(NVME_PRLI_SP_SLER); 2402 /* Set service parameter BIT_9 for PI control support */ 2403 logio->io_parameter[0] |= 2404 cpu_to_le32(NVME_PRLI_SP_PI_CTRL); 2405 } 2406 } 2407 2408 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2409 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2410 logio->port_id[1] = sp->fcport->d_id.b.area; 2411 logio->port_id[2] = sp->fcport->d_id.b.domain; 2412 logio->vp_index = sp->vha->vp_idx; 2413 } 2414 2415 static void 2416 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2417 { 2418 struct srb_iocb *lio = &sp->u.iocb_cmd; 2419 2420 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2421 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2422 2423 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { 2424 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2425 } else { 2426 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2427 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2428 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2429 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2430 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2431 if (lio->u.logio.flags & SRB_LOGIN_FCSP) { 2432 logio->control_flags |= 2433 cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); 2434 logio->io_parameter[0] = 2435 cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); 2436 } 2437 } 2438 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2439 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2440 logio->port_id[1] = sp->fcport->d_id.b.area; 2441 logio->port_id[2] = sp->fcport->d_id.b.domain; 2442 logio->vp_index = sp->vha->vp_idx; 2443 } 2444 2445 static void 2446 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2447 { 2448 struct qla_hw_data *ha = sp->vha->hw; 2449 struct srb_iocb *lio = &sp->u.iocb_cmd; 2450 uint16_t opts; 2451 2452 mbx->entry_type = MBX_IOCB_TYPE; 2453 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2454 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2455 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2456 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2457 if (HAS_EXTENDED_IDS(ha)) { 2458 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2459 mbx->mb10 = cpu_to_le16(opts); 2460 } else { 2461 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2462 } 2463 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2464 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2465 sp->fcport->d_id.b.al_pa); 2466 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2467 } 2468 2469 static void 2470 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2471 { 2472 u16 control_flags = LCF_COMMAND_LOGO; 2473 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2474 2475 if (sp->fcport->explicit_logout) { 2476 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; 2477 } else { 2478 control_flags |= LCF_IMPL_LOGO; 2479 2480 if (!sp->fcport->keep_nport_handle) 2481 control_flags |= LCF_FREE_NPORT; 2482 } 2483 2484 logio->control_flags = cpu_to_le16(control_flags); 2485 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2486 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2487 logio->port_id[1] = sp->fcport->d_id.b.area; 2488 logio->port_id[2] = sp->fcport->d_id.b.domain; 2489 logio->vp_index = sp->vha->vp_idx; 2490 } 2491 2492 static void 2493 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2494 { 2495 struct qla_hw_data *ha = sp->vha->hw; 2496 2497 mbx->entry_type = MBX_IOCB_TYPE; 2498 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2499 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2500 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2501 cpu_to_le16(sp->fcport->loop_id) : 2502 cpu_to_le16(sp->fcport->loop_id << 8); 2503 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2504 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2505 sp->fcport->d_id.b.al_pa); 2506 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2507 /* Implicit: mbx->mbx10 = 0. */ 2508 } 2509 2510 static void 2511 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2512 { 2513 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2514 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2515 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2516 logio->vp_index = sp->vha->vp_idx; 2517 } 2518 2519 static void 2520 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2521 { 2522 struct qla_hw_data *ha = sp->vha->hw; 2523 2524 mbx->entry_type = MBX_IOCB_TYPE; 2525 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2526 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2527 if (HAS_EXTENDED_IDS(ha)) { 2528 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2529 mbx->mb10 = cpu_to_le16(BIT_0); 2530 } else { 2531 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2532 } 2533 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2534 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2535 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2536 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2537 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2538 } 2539 2540 static void 2541 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2542 { 2543 uint32_t flags; 2544 uint64_t lun; 2545 struct fc_port *fcport = sp->fcport; 2546 scsi_qla_host_t *vha = fcport->vha; 2547 struct qla_hw_data *ha = vha->hw; 2548 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2549 struct req_que *req = sp->qpair->req; 2550 2551 flags = iocb->u.tmf.flags; 2552 lun = iocb->u.tmf.lun; 2553 2554 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2555 tsk->entry_count = 1; 2556 tsk->handle = make_handle(req->id, tsk->handle); 2557 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2558 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2559 tsk->control_flags = cpu_to_le32(flags); 2560 tsk->port_id[0] = fcport->d_id.b.al_pa; 2561 tsk->port_id[1] = fcport->d_id.b.area; 2562 tsk->port_id[2] = fcport->d_id.b.domain; 2563 tsk->vp_index = fcport->vha->vp_idx; 2564 2565 if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| 2566 TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { 2567 int_to_scsilun(lun, &tsk->lun); 2568 host_to_fcp_swap((uint8_t *)&tsk->lun, 2569 sizeof(tsk->lun)); 2570 } 2571 } 2572 2573 static void 2574 qla2x00_async_done(struct srb *sp, int res) 2575 { 2576 if (del_timer(&sp->u.iocb_cmd.timer)) { 2577 /* 2578 * Successfully cancelled the timeout handler 2579 * ref: TMR 2580 */ 2581 if (kref_put(&sp->cmd_kref, qla2x00_sp_release)) 2582 return; 2583 } 2584 sp->async_done(sp, res); 2585 } 2586 2587 void 2588 qla2x00_sp_release(struct kref *kref) 2589 { 2590 struct srb *sp = container_of(kref, struct srb, cmd_kref); 2591 2592 sp->free(sp); 2593 } 2594 2595 void 2596 qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, 2597 void (*done)(struct srb *sp, int res)) 2598 { 2599 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 2600 sp->done = qla2x00_async_done; 2601 sp->async_done = done; 2602 sp->free = qla2x00_sp_free; 2603 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 2604 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 2605 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2606 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2607 sp->start_timer = 1; 2608 } 2609 2610 static void qla2x00_els_dcmd_sp_free(srb_t *sp) 2611 { 2612 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2613 2614 kfree(sp->fcport); 2615 2616 if (elsio->u.els_logo.els_logo_pyld) 2617 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, 2618 elsio->u.els_logo.els_logo_pyld, 2619 elsio->u.els_logo.els_logo_pyld_dma); 2620 2621 del_timer(&elsio->timer); 2622 qla2x00_rel_sp(sp); 2623 } 2624 2625 static void 2626 qla2x00_els_dcmd_iocb_timeout(void *data) 2627 { 2628 srb_t *sp = data; 2629 fc_port_t *fcport = sp->fcport; 2630 struct scsi_qla_host *vha = sp->vha; 2631 struct srb_iocb *lio = &sp->u.iocb_cmd; 2632 unsigned long flags = 0; 2633 int res, h; 2634 2635 ql_dbg(ql_dbg_io, vha, 0x3069, 2636 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2637 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2638 fcport->d_id.b.al_pa); 2639 2640 /* Abort the exchange */ 2641 res = qla24xx_async_abort_cmd(sp, false); 2642 if (res) { 2643 ql_dbg(ql_dbg_io, vha, 0x3070, 2644 "mbx abort_command failed.\n"); 2645 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2646 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2647 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2648 sp->qpair->req->outstanding_cmds[h] = NULL; 2649 break; 2650 } 2651 } 2652 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2653 complete(&lio->u.els_logo.comp); 2654 } else { 2655 ql_dbg(ql_dbg_io, vha, 0x3071, 2656 "mbx abort_command success.\n"); 2657 } 2658 } 2659 2660 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) 2661 { 2662 fc_port_t *fcport = sp->fcport; 2663 struct srb_iocb *lio = &sp->u.iocb_cmd; 2664 struct scsi_qla_host *vha = sp->vha; 2665 2666 ql_dbg(ql_dbg_io, vha, 0x3072, 2667 "%s hdl=%x, portid=%02x%02x%02x done\n", 2668 sp->name, sp->handle, fcport->d_id.b.domain, 2669 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2670 2671 complete(&lio->u.els_logo.comp); 2672 } 2673 2674 int 2675 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2676 port_id_t remote_did) 2677 { 2678 srb_t *sp; 2679 fc_port_t *fcport = NULL; 2680 struct srb_iocb *elsio = NULL; 2681 struct qla_hw_data *ha = vha->hw; 2682 struct els_logo_payload logo_pyld; 2683 int rval = QLA_SUCCESS; 2684 2685 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2686 if (!fcport) { 2687 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2688 return -ENOMEM; 2689 } 2690 2691 /* Alloc SRB structure 2692 * ref: INIT 2693 */ 2694 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2695 if (!sp) { 2696 kfree(fcport); 2697 ql_log(ql_log_info, vha, 0x70e6, 2698 "SRB allocation failed\n"); 2699 return -ENOMEM; 2700 } 2701 2702 elsio = &sp->u.iocb_cmd; 2703 fcport->loop_id = 0xFFFF; 2704 fcport->d_id.b.domain = remote_did.b.domain; 2705 fcport->d_id.b.area = remote_did.b.area; 2706 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2707 2708 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2709 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2710 2711 sp->type = SRB_ELS_DCMD; 2712 sp->name = "ELS_DCMD"; 2713 sp->fcport = fcport; 2714 qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, 2715 qla2x00_els_dcmd_sp_done); 2716 sp->free = qla2x00_els_dcmd_sp_free; 2717 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; 2718 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 2719 2720 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2721 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2722 GFP_KERNEL); 2723 2724 if (!elsio->u.els_logo.els_logo_pyld) { 2725 /* ref: INIT */ 2726 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2727 return QLA_FUNCTION_FAILED; 2728 } 2729 2730 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2731 2732 elsio->u.els_logo.els_cmd = els_opcode; 2733 logo_pyld.opcode = els_opcode; 2734 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2735 logo_pyld.s_id[1] = vha->d_id.b.area; 2736 logo_pyld.s_id[2] = vha->d_id.b.domain; 2737 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2738 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2739 2740 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2741 sizeof(struct els_logo_payload)); 2742 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); 2743 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, 2744 elsio->u.els_logo.els_logo_pyld, 2745 sizeof(*elsio->u.els_logo.els_logo_pyld)); 2746 2747 rval = qla2x00_start_sp(sp); 2748 if (rval != QLA_SUCCESS) { 2749 /* ref: INIT */ 2750 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2751 return QLA_FUNCTION_FAILED; 2752 } 2753 2754 ql_dbg(ql_dbg_io, vha, 0x3074, 2755 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2756 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2757 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2758 2759 wait_for_completion(&elsio->u.els_logo.comp); 2760 2761 /* ref: INIT */ 2762 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2763 return rval; 2764 } 2765 2766 static void 2767 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2768 { 2769 scsi_qla_host_t *vha = sp->vha; 2770 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2771 2772 els_iocb->entry_type = ELS_IOCB_TYPE; 2773 els_iocb->entry_count = 1; 2774 els_iocb->sys_define = 0; 2775 els_iocb->entry_status = 0; 2776 els_iocb->handle = sp->handle; 2777 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2778 els_iocb->tx_dsd_count = cpu_to_le16(1); 2779 els_iocb->vp_index = vha->vp_idx; 2780 els_iocb->sof_type = EST_SOFI3; 2781 els_iocb->rx_dsd_count = 0; 2782 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2783 2784 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 2785 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 2786 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 2787 /* For SID the byte order is different than DID */ 2788 els_iocb->s_id[1] = vha->d_id.b.al_pa; 2789 els_iocb->s_id[2] = vha->d_id.b.area; 2790 els_iocb->s_id[0] = vha->d_id.b.domain; 2791 2792 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2793 if (vha->hw->flags.edif_enabled) 2794 els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN); 2795 else 2796 els_iocb->control_flags = 0; 2797 els_iocb->tx_byte_count = els_iocb->tx_len = 2798 cpu_to_le32(sizeof(struct els_plogi_payload)); 2799 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2800 &els_iocb->tx_address); 2801 els_iocb->rx_dsd_count = cpu_to_le16(1); 2802 els_iocb->rx_byte_count = els_iocb->rx_len = 2803 cpu_to_le32(sizeof(struct els_plogi_payload)); 2804 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2805 &els_iocb->rx_address); 2806 2807 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, 2808 "PLOGI ELS IOCB:\n"); 2809 ql_dump_buffer(ql_log_info, vha, 0x0109, 2810 (uint8_t *)els_iocb, 2811 sizeof(*els_iocb)); 2812 } else { 2813 els_iocb->tx_byte_count = 2814 cpu_to_le32(sizeof(struct els_logo_payload)); 2815 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2816 &els_iocb->tx_address); 2817 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2818 2819 els_iocb->rx_byte_count = 0; 2820 els_iocb->rx_address = 0; 2821 els_iocb->rx_len = 0; 2822 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, 2823 "LOGO ELS IOCB:"); 2824 ql_dump_buffer(ql_log_info, vha, 0x010b, 2825 els_iocb, 2826 sizeof(*els_iocb)); 2827 } 2828 2829 sp->vha->qla_stats.control_requests++; 2830 } 2831 2832 void 2833 qla2x00_els_dcmd2_iocb_timeout(void *data) 2834 { 2835 srb_t *sp = data; 2836 fc_port_t *fcport = sp->fcport; 2837 struct scsi_qla_host *vha = sp->vha; 2838 unsigned long flags = 0; 2839 int res, h; 2840 2841 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, 2842 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", 2843 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); 2844 2845 /* Abort the exchange */ 2846 res = qla24xx_async_abort_cmd(sp, false); 2847 ql_dbg(ql_dbg_io, vha, 0x3070, 2848 "mbx abort_command %s\n", 2849 (res == QLA_SUCCESS) ? "successful" : "failed"); 2850 if (res) { 2851 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2852 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2853 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2854 sp->qpair->req->outstanding_cmds[h] = NULL; 2855 break; 2856 } 2857 } 2858 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2859 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2860 } 2861 } 2862 2863 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) 2864 { 2865 if (els_plogi->els_plogi_pyld) 2866 dma_free_coherent(&vha->hw->pdev->dev, 2867 els_plogi->tx_size, 2868 els_plogi->els_plogi_pyld, 2869 els_plogi->els_plogi_pyld_dma); 2870 2871 if (els_plogi->els_resp_pyld) 2872 dma_free_coherent(&vha->hw->pdev->dev, 2873 els_plogi->rx_size, 2874 els_plogi->els_resp_pyld, 2875 els_plogi->els_resp_pyld_dma); 2876 } 2877 2878 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) 2879 { 2880 fc_port_t *fcport = sp->fcport; 2881 struct srb_iocb *lio = &sp->u.iocb_cmd; 2882 struct scsi_qla_host *vha = sp->vha; 2883 struct event_arg ea; 2884 struct qla_work_evt *e; 2885 struct fc_port *conflict_fcport; 2886 port_id_t cid; /* conflict Nport id */ 2887 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; 2888 u16 lid; 2889 2890 ql_dbg(ql_dbg_disc, vha, 0x3072, 2891 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", 2892 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); 2893 2894 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 2895 /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/ 2896 fcport->logout_on_delete = 1; 2897 fcport->chip_reset = vha->hw->base_qpair->chip_reset; 2898 2899 if (sp->flags & SRB_WAKEUP_ON_COMP) 2900 complete(&lio->u.els_plogi.comp); 2901 else { 2902 switch (le32_to_cpu(fw_status[0])) { 2903 case CS_DATA_UNDERRUN: 2904 case CS_COMPLETE: 2905 memset(&ea, 0, sizeof(ea)); 2906 ea.fcport = fcport; 2907 ea.rc = res; 2908 qla_handle_els_plogi_done(vha, &ea); 2909 break; 2910 2911 case CS_IOCB_ERROR: 2912 switch (le32_to_cpu(fw_status[1])) { 2913 case LSC_SCODE_PORTID_USED: 2914 lid = le32_to_cpu(fw_status[2]) & 0xffff; 2915 qlt_find_sess_invalidate_other(vha, 2916 wwn_to_u64(fcport->port_name), 2917 fcport->d_id, lid, &conflict_fcport); 2918 if (conflict_fcport) { 2919 /* 2920 * Another fcport shares the same 2921 * loop_id & nport id; conflict 2922 * fcport needs to finish cleanup 2923 * before this fcport can proceed 2924 * to login. 2925 */ 2926 conflict_fcport->conflict = fcport; 2927 fcport->login_pause = 1; 2928 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2929 "%s %d %8phC pid %06x inuse with lid %#x.\n", 2930 __func__, __LINE__, 2931 fcport->port_name, 2932 fcport->d_id.b24, lid); 2933 } else { 2934 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2935 "%s %d %8phC pid %06x inuse with lid %#x sched del\n", 2936 __func__, __LINE__, 2937 fcport->port_name, 2938 fcport->d_id.b24, lid); 2939 qla2x00_clear_loop_id(fcport); 2940 set_bit(lid, vha->hw->loop_id_map); 2941 fcport->loop_id = lid; 2942 fcport->keep_nport_handle = 0; 2943 qlt_schedule_sess_for_deletion(fcport); 2944 } 2945 break; 2946 2947 case LSC_SCODE_NPORT_USED: 2948 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) 2949 & 0xff; 2950 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) 2951 & 0xff; 2952 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; 2953 cid.b.rsvd_1 = 0; 2954 2955 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2956 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2957 __func__, __LINE__, fcport->port_name, 2958 fcport->loop_id, cid.b24); 2959 set_bit(fcport->loop_id, 2960 vha->hw->loop_id_map); 2961 fcport->loop_id = FC_NO_LOOP_ID; 2962 qla24xx_post_gnl_work(vha, fcport); 2963 break; 2964 2965 case LSC_SCODE_NOXCB: 2966 vha->hw->exch_starvation++; 2967 if (vha->hw->exch_starvation > 5) { 2968 ql_log(ql_log_warn, vha, 0xd046, 2969 "Exchange starvation. Resetting RISC\n"); 2970 vha->hw->exch_starvation = 0; 2971 set_bit(ISP_ABORT_NEEDED, 2972 &vha->dpc_flags); 2973 qla2xxx_wake_dpc(vha); 2974 break; 2975 } 2976 fallthrough; 2977 default: 2978 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2979 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", 2980 __func__, sp->fcport->port_name, 2981 fw_status[0], fw_status[1], fw_status[2]); 2982 2983 fcport->flags &= ~FCF_ASYNC_SENT; 2984 qlt_schedule_sess_for_deletion(fcport); 2985 break; 2986 } 2987 break; 2988 2989 default: 2990 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2991 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", 2992 __func__, sp->fcport->port_name, 2993 fw_status[0], fw_status[1], fw_status[2]); 2994 2995 sp->fcport->flags &= ~FCF_ASYNC_SENT; 2996 qlt_schedule_sess_for_deletion(fcport); 2997 break; 2998 } 2999 3000 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 3001 if (!e) { 3002 struct srb_iocb *elsio = &sp->u.iocb_cmd; 3003 3004 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 3005 /* ref: INIT */ 3006 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3007 return; 3008 } 3009 e->u.iosb.sp = sp; 3010 qla2x00_post_work(vha, e); 3011 } 3012 } 3013 3014 int 3015 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, 3016 fc_port_t *fcport, bool wait) 3017 { 3018 srb_t *sp; 3019 struct srb_iocb *elsio = NULL; 3020 struct qla_hw_data *ha = vha->hw; 3021 int rval = QLA_SUCCESS; 3022 void *ptr, *resp_ptr; 3023 3024 /* Alloc SRB structure 3025 * ref: INIT 3026 */ 3027 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3028 if (!sp) { 3029 ql_log(ql_log_info, vha, 0x70e6, 3030 "SRB allocation failed\n"); 3031 fcport->flags &= ~FCF_ASYNC_ACTIVE; 3032 return -ENOMEM; 3033 } 3034 3035 fcport->flags |= FCF_ASYNC_SENT; 3036 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 3037 elsio = &sp->u.iocb_cmd; 3038 ql_dbg(ql_dbg_io, vha, 0x3073, 3039 "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); 3040 3041 if (wait) 3042 sp->flags = SRB_WAKEUP_ON_COMP; 3043 3044 sp->type = SRB_ELS_DCMD; 3045 sp->name = "ELS_DCMD"; 3046 sp->fcport = fcport; 3047 qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, 3048 qla2x00_els_dcmd2_sp_done); 3049 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; 3050 3051 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; 3052 3053 ptr = elsio->u.els_plogi.els_plogi_pyld = 3054 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, 3055 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 3056 3057 if (!elsio->u.els_plogi.els_plogi_pyld) { 3058 rval = QLA_FUNCTION_FAILED; 3059 goto out; 3060 } 3061 3062 resp_ptr = elsio->u.els_plogi.els_resp_pyld = 3063 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, 3064 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); 3065 3066 if (!elsio->u.els_plogi.els_resp_pyld) { 3067 rval = QLA_FUNCTION_FAILED; 3068 goto out; 3069 } 3070 3071 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); 3072 3073 memset(ptr, 0, sizeof(struct els_plogi_payload)); 3074 memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); 3075 memcpy(elsio->u.els_plogi.els_plogi_pyld->data, 3076 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); 3077 3078 elsio->u.els_plogi.els_cmd = els_opcode; 3079 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; 3080 3081 if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { 3082 struct fc_els_flogi *p = ptr; 3083 3084 p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); 3085 } 3086 3087 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); 3088 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, 3089 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 3090 sizeof(*elsio->u.els_plogi.els_plogi_pyld)); 3091 3092 init_completion(&elsio->u.els_plogi.comp); 3093 rval = qla2x00_start_sp(sp); 3094 if (rval != QLA_SUCCESS) { 3095 rval = QLA_FUNCTION_FAILED; 3096 } else { 3097 ql_dbg(ql_dbg_disc, vha, 0x3074, 3098 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", 3099 sp->name, sp->handle, fcport->loop_id, 3100 fcport->d_id.b24, vha->d_id.b24); 3101 } 3102 3103 if (wait) { 3104 wait_for_completion(&elsio->u.els_plogi.comp); 3105 3106 if (elsio->u.els_plogi.comp_status != CS_COMPLETE) 3107 rval = QLA_FUNCTION_FAILED; 3108 } else { 3109 goto done; 3110 } 3111 3112 out: 3113 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 3114 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 3115 /* ref: INIT */ 3116 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3117 done: 3118 return rval; 3119 } 3120 3121 /* it is assume qpair lock is held */ 3122 void qla_els_pt_iocb(struct scsi_qla_host *vha, 3123 struct els_entry_24xx *els_iocb, 3124 struct qla_els_pt_arg *a) 3125 { 3126 els_iocb->entry_type = ELS_IOCB_TYPE; 3127 els_iocb->entry_count = 1; 3128 els_iocb->sys_define = 0; 3129 els_iocb->entry_status = 0; 3130 els_iocb->handle = QLA_SKIP_HANDLE; 3131 els_iocb->nport_handle = a->nport_handle; 3132 els_iocb->rx_xchg_address = a->rx_xchg_address; 3133 els_iocb->tx_dsd_count = cpu_to_le16(1); 3134 els_iocb->vp_index = a->vp_idx; 3135 els_iocb->sof_type = EST_SOFI3; 3136 els_iocb->rx_dsd_count = cpu_to_le16(0); 3137 els_iocb->opcode = a->els_opcode; 3138 3139 els_iocb->d_id[0] = a->did.b.al_pa; 3140 els_iocb->d_id[1] = a->did.b.area; 3141 els_iocb->d_id[2] = a->did.b.domain; 3142 /* For SID the byte order is different than DID */ 3143 els_iocb->s_id[1] = vha->d_id.b.al_pa; 3144 els_iocb->s_id[2] = vha->d_id.b.area; 3145 els_iocb->s_id[0] = vha->d_id.b.domain; 3146 3147 els_iocb->control_flags = cpu_to_le16(a->control_flags); 3148 3149 els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); 3150 els_iocb->tx_len = cpu_to_le32(a->tx_len); 3151 put_unaligned_le64(a->tx_addr, &els_iocb->tx_address); 3152 3153 els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); 3154 els_iocb->rx_len = cpu_to_le32(a->rx_len); 3155 put_unaligned_le64(a->rx_addr, &els_iocb->rx_address); 3156 } 3157 3158 static void 3159 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 3160 { 3161 struct bsg_job *bsg_job = sp->u.bsg_job; 3162 struct fc_bsg_request *bsg_request = bsg_job->request; 3163 3164 els_iocb->entry_type = ELS_IOCB_TYPE; 3165 els_iocb->entry_count = 1; 3166 els_iocb->sys_define = 0; 3167 els_iocb->entry_status = 0; 3168 els_iocb->handle = sp->handle; 3169 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3170 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3171 els_iocb->vp_index = sp->vha->vp_idx; 3172 els_iocb->sof_type = EST_SOFI3; 3173 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3174 3175 els_iocb->opcode = 3176 sp->type == SRB_ELS_CMD_RPT ? 3177 bsg_request->rqst_data.r_els.els_code : 3178 bsg_request->rqst_data.h_els.command_code; 3179 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 3180 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 3181 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 3182 els_iocb->control_flags = 0; 3183 els_iocb->rx_byte_count = 3184 cpu_to_le32(bsg_job->reply_payload.payload_len); 3185 els_iocb->tx_byte_count = 3186 cpu_to_le32(bsg_job->request_payload.payload_len); 3187 3188 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3189 &els_iocb->tx_address); 3190 els_iocb->tx_len = cpu_to_le32(sg_dma_len 3191 (bsg_job->request_payload.sg_list)); 3192 3193 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3194 &els_iocb->rx_address); 3195 els_iocb->rx_len = cpu_to_le32(sg_dma_len 3196 (bsg_job->reply_payload.sg_list)); 3197 3198 sp->vha->qla_stats.control_requests++; 3199 } 3200 3201 static void 3202 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 3203 { 3204 uint16_t avail_dsds; 3205 struct dsd64 *cur_dsd; 3206 struct scatterlist *sg; 3207 int index; 3208 uint16_t tot_dsds; 3209 scsi_qla_host_t *vha = sp->vha; 3210 struct qla_hw_data *ha = vha->hw; 3211 struct bsg_job *bsg_job = sp->u.bsg_job; 3212 int entry_count = 1; 3213 3214 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 3215 ct_iocb->entry_type = CT_IOCB_TYPE; 3216 ct_iocb->entry_status = 0; 3217 ct_iocb->handle1 = sp->handle; 3218 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 3219 ct_iocb->status = cpu_to_le16(0); 3220 ct_iocb->control_flags = cpu_to_le16(0); 3221 ct_iocb->timeout = 0; 3222 ct_iocb->cmd_dsd_count = 3223 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3224 ct_iocb->total_dsd_count = 3225 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 3226 ct_iocb->req_bytecount = 3227 cpu_to_le32(bsg_job->request_payload.payload_len); 3228 ct_iocb->rsp_bytecount = 3229 cpu_to_le32(bsg_job->reply_payload.payload_len); 3230 3231 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3232 &ct_iocb->req_dsd.address); 3233 ct_iocb->req_dsd.length = ct_iocb->req_bytecount; 3234 3235 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3236 &ct_iocb->rsp_dsd.address); 3237 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; 3238 3239 avail_dsds = 1; 3240 cur_dsd = &ct_iocb->rsp_dsd; 3241 index = 0; 3242 tot_dsds = bsg_job->reply_payload.sg_cnt; 3243 3244 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 3245 cont_a64_entry_t *cont_pkt; 3246 3247 /* Allocate additional continuation packets? */ 3248 if (avail_dsds == 0) { 3249 /* 3250 * Five DSDs are available in the Cont. 3251 * Type 1 IOCB. 3252 */ 3253 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3254 vha->hw->req_q_map[0]); 3255 cur_dsd = cont_pkt->dsd; 3256 avail_dsds = 5; 3257 entry_count++; 3258 } 3259 3260 append_dsd64(&cur_dsd, sg); 3261 avail_dsds--; 3262 } 3263 ct_iocb->entry_count = entry_count; 3264 3265 sp->vha->qla_stats.control_requests++; 3266 } 3267 3268 static void 3269 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 3270 { 3271 uint16_t avail_dsds; 3272 struct dsd64 *cur_dsd; 3273 struct scatterlist *sg; 3274 int index; 3275 uint16_t cmd_dsds, rsp_dsds; 3276 scsi_qla_host_t *vha = sp->vha; 3277 struct qla_hw_data *ha = vha->hw; 3278 struct bsg_job *bsg_job = sp->u.bsg_job; 3279 int entry_count = 1; 3280 cont_a64_entry_t *cont_pkt = NULL; 3281 3282 ct_iocb->entry_type = CT_IOCB_TYPE; 3283 ct_iocb->entry_status = 0; 3284 ct_iocb->sys_define = 0; 3285 ct_iocb->handle = sp->handle; 3286 3287 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3288 ct_iocb->vp_index = sp->vha->vp_idx; 3289 ct_iocb->comp_status = cpu_to_le16(0); 3290 3291 cmd_dsds = bsg_job->request_payload.sg_cnt; 3292 rsp_dsds = bsg_job->reply_payload.sg_cnt; 3293 3294 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); 3295 ct_iocb->timeout = 0; 3296 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); 3297 ct_iocb->cmd_byte_count = 3298 cpu_to_le32(bsg_job->request_payload.payload_len); 3299 3300 avail_dsds = 2; 3301 cur_dsd = ct_iocb->dsd; 3302 index = 0; 3303 3304 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3305 /* Allocate additional continuation packets? */ 3306 if (avail_dsds == 0) { 3307 /* 3308 * Five DSDs are available in the Cont. 3309 * Type 1 IOCB. 3310 */ 3311 cont_pkt = qla2x00_prep_cont_type1_iocb( 3312 vha, ha->req_q_map[0]); 3313 cur_dsd = cont_pkt->dsd; 3314 avail_dsds = 5; 3315 entry_count++; 3316 } 3317 3318 append_dsd64(&cur_dsd, sg); 3319 avail_dsds--; 3320 } 3321 3322 index = 0; 3323 3324 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3325 /* Allocate additional continuation packets? */ 3326 if (avail_dsds == 0) { 3327 /* 3328 * Five DSDs are available in the Cont. 3329 * Type 1 IOCB. 3330 */ 3331 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3332 ha->req_q_map[0]); 3333 cur_dsd = cont_pkt->dsd; 3334 avail_dsds = 5; 3335 entry_count++; 3336 } 3337 3338 append_dsd64(&cur_dsd, sg); 3339 avail_dsds--; 3340 } 3341 ct_iocb->entry_count = entry_count; 3342 } 3343 3344 /* 3345 * qla82xx_start_scsi() - Send a SCSI command to the ISP 3346 * @sp: command to send to the ISP 3347 * 3348 * Returns non-zero if a failure occurred, else zero. 3349 */ 3350 int 3351 qla82xx_start_scsi(srb_t *sp) 3352 { 3353 int nseg; 3354 unsigned long flags; 3355 struct scsi_cmnd *cmd; 3356 uint32_t *clr_ptr; 3357 uint32_t handle; 3358 uint16_t cnt; 3359 uint16_t req_cnt; 3360 uint16_t tot_dsds; 3361 struct device_reg_82xx __iomem *reg; 3362 uint32_t dbval; 3363 __be32 *fcp_dl; 3364 uint8_t additional_cdb_len; 3365 struct ct6_dsd *ctx; 3366 struct scsi_qla_host *vha = sp->vha; 3367 struct qla_hw_data *ha = vha->hw; 3368 struct req_que *req = NULL; 3369 struct rsp_que *rsp = NULL; 3370 3371 /* Setup device pointers. */ 3372 reg = &ha->iobase->isp82; 3373 cmd = GET_CMD_SP(sp); 3374 req = vha->req; 3375 rsp = ha->rsp_q_map[0]; 3376 3377 /* So we know we haven't pci_map'ed anything yet */ 3378 tot_dsds = 0; 3379 3380 dbval = 0x04 | (ha->portnum << 5); 3381 3382 /* Send marker if required */ 3383 if (vha->marker_needed != 0) { 3384 if (qla2x00_marker(vha, ha->base_qpair, 3385 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 3386 ql_log(ql_log_warn, vha, 0x300c, 3387 "qla2x00_marker failed for cmd=%p.\n", cmd); 3388 return QLA_FUNCTION_FAILED; 3389 } 3390 vha->marker_needed = 0; 3391 } 3392 3393 /* Acquire ring specific lock */ 3394 spin_lock_irqsave(&ha->hardware_lock, flags); 3395 3396 handle = qla2xxx_get_next_handle(req); 3397 if (handle == 0) 3398 goto queuing_error; 3399 3400 /* Map the sg table so we have an accurate count of sg entries needed */ 3401 if (scsi_sg_count(cmd)) { 3402 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3403 scsi_sg_count(cmd), cmd->sc_data_direction); 3404 if (unlikely(!nseg)) 3405 goto queuing_error; 3406 } else 3407 nseg = 0; 3408 3409 tot_dsds = nseg; 3410 3411 if (tot_dsds > ql2xshiftctondsd) { 3412 struct cmd_type_6 *cmd_pkt; 3413 uint16_t more_dsd_lists = 0; 3414 struct dsd_dma *dsd_ptr; 3415 uint16_t i; 3416 3417 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 3418 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 3419 ql_dbg(ql_dbg_io, vha, 0x300d, 3420 "Num of DSD list %d is than %d for cmd=%p.\n", 3421 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 3422 cmd); 3423 goto queuing_error; 3424 } 3425 3426 if (more_dsd_lists <= ha->gbl_dsd_avail) 3427 goto sufficient_dsds; 3428 else 3429 more_dsd_lists -= ha->gbl_dsd_avail; 3430 3431 for (i = 0; i < more_dsd_lists; i++) { 3432 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 3433 if (!dsd_ptr) { 3434 ql_log(ql_log_fatal, vha, 0x300e, 3435 "Failed to allocate memory for dsd_dma " 3436 "for cmd=%p.\n", cmd); 3437 goto queuing_error; 3438 } 3439 3440 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 3441 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 3442 if (!dsd_ptr->dsd_addr) { 3443 kfree(dsd_ptr); 3444 ql_log(ql_log_fatal, vha, 0x300f, 3445 "Failed to allocate memory for dsd_addr " 3446 "for cmd=%p.\n", cmd); 3447 goto queuing_error; 3448 } 3449 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 3450 ha->gbl_dsd_avail++; 3451 } 3452 3453 sufficient_dsds: 3454 req_cnt = 1; 3455 3456 if (req->cnt < (req_cnt + 2)) { 3457 cnt = (uint16_t)rd_reg_dword_relaxed( 3458 ®->req_q_out[0]); 3459 if (req->ring_index < cnt) 3460 req->cnt = cnt - req->ring_index; 3461 else 3462 req->cnt = req->length - 3463 (req->ring_index - cnt); 3464 if (req->cnt < (req_cnt + 2)) 3465 goto queuing_error; 3466 } 3467 3468 ctx = &sp->u.scmd.ct6_ctx; 3469 3470 memset(ctx, 0, sizeof(struct ct6_dsd)); 3471 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, 3472 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 3473 if (!ctx->fcp_cmnd) { 3474 ql_log(ql_log_fatal, vha, 0x3011, 3475 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 3476 goto queuing_error; 3477 } 3478 3479 /* Initialize the DSD list and dma handle */ 3480 INIT_LIST_HEAD(&ctx->dsd_list); 3481 ctx->dsd_use_cnt = 0; 3482 3483 if (cmd->cmd_len > 16) { 3484 additional_cdb_len = cmd->cmd_len - 16; 3485 if ((cmd->cmd_len % 4) != 0) { 3486 /* SCSI command bigger than 16 bytes must be 3487 * multiple of 4 3488 */ 3489 ql_log(ql_log_warn, vha, 0x3012, 3490 "scsi cmd len %d not multiple of 4 " 3491 "for cmd=%p.\n", cmd->cmd_len, cmd); 3492 goto queuing_error_fcp_cmnd; 3493 } 3494 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 3495 } else { 3496 additional_cdb_len = 0; 3497 ctx->fcp_cmnd_len = 12 + 16 + 4; 3498 } 3499 3500 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 3501 cmd_pkt->handle = make_handle(req->id, handle); 3502 3503 /* Zero out remaining portion of packet. */ 3504 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3505 clr_ptr = (uint32_t *)cmd_pkt + 2; 3506 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3507 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3508 3509 /* Set NPORT-ID and LUN number*/ 3510 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3511 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3512 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3513 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3514 cmd_pkt->vp_index = sp->vha->vp_idx; 3515 3516 /* Build IOCB segments */ 3517 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 3518 goto queuing_error_fcp_cmnd; 3519 3520 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3521 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 3522 3523 /* build FCP_CMND IU */ 3524 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 3525 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 3526 3527 if (cmd->sc_data_direction == DMA_TO_DEVICE) 3528 ctx->fcp_cmnd->additional_cdb_len |= 1; 3529 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 3530 ctx->fcp_cmnd->additional_cdb_len |= 2; 3531 3532 /* Populate the FCP_PRIO. */ 3533 if (ha->flags.fcp_prio_enabled) 3534 ctx->fcp_cmnd->task_attribute |= 3535 sp->fcport->fcp_prio << 3; 3536 3537 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 3538 3539 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + 3540 additional_cdb_len); 3541 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 3542 3543 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 3544 put_unaligned_le64(ctx->fcp_cmnd_dma, 3545 &cmd_pkt->fcp_cmnd_dseg_address); 3546 3547 sp->flags |= SRB_FCP_CMND_DMA_VALID; 3548 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3549 /* Set total data segment count. */ 3550 cmd_pkt->entry_count = (uint8_t)req_cnt; 3551 /* Specify response queue number where 3552 * completion should happen 3553 */ 3554 cmd_pkt->entry_status = (uint8_t) rsp->id; 3555 } else { 3556 struct cmd_type_7 *cmd_pkt; 3557 3558 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3559 if (req->cnt < (req_cnt + 2)) { 3560 cnt = (uint16_t)rd_reg_dword_relaxed( 3561 ®->req_q_out[0]); 3562 if (req->ring_index < cnt) 3563 req->cnt = cnt - req->ring_index; 3564 else 3565 req->cnt = req->length - 3566 (req->ring_index - cnt); 3567 } 3568 if (req->cnt < (req_cnt + 2)) 3569 goto queuing_error; 3570 3571 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 3572 cmd_pkt->handle = make_handle(req->id, handle); 3573 3574 /* Zero out remaining portion of packet. */ 3575 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3576 clr_ptr = (uint32_t *)cmd_pkt + 2; 3577 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3578 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3579 3580 /* Set NPORT-ID and LUN number*/ 3581 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3582 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3583 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3584 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3585 cmd_pkt->vp_index = sp->vha->vp_idx; 3586 3587 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3588 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 3589 sizeof(cmd_pkt->lun)); 3590 3591 /* Populate the FCP_PRIO. */ 3592 if (ha->flags.fcp_prio_enabled) 3593 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 3594 3595 /* Load SCSI command packet. */ 3596 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 3597 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 3598 3599 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3600 3601 /* Build IOCB segments */ 3602 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 3603 3604 /* Set total data segment count. */ 3605 cmd_pkt->entry_count = (uint8_t)req_cnt; 3606 /* Specify response queue number where 3607 * completion should happen. 3608 */ 3609 cmd_pkt->entry_status = (uint8_t) rsp->id; 3610 3611 } 3612 /* Build command packet. */ 3613 req->current_outstanding_cmd = handle; 3614 req->outstanding_cmds[handle] = sp; 3615 sp->handle = handle; 3616 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3617 req->cnt -= req_cnt; 3618 wmb(); 3619 3620 /* Adjust ring index. */ 3621 req->ring_index++; 3622 if (req->ring_index == req->length) { 3623 req->ring_index = 0; 3624 req->ring_ptr = req->ring; 3625 } else 3626 req->ring_ptr++; 3627 3628 sp->flags |= SRB_DMA_VALID; 3629 3630 /* Set chip new ring index. */ 3631 /* write, read and verify logic */ 3632 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3633 if (ql2xdbwr) 3634 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3635 else { 3636 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3637 wmb(); 3638 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { 3639 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3640 wmb(); 3641 } 3642 } 3643 3644 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3645 if (vha->flags.process_response_queue && 3646 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3647 qla24xx_process_response_queue(vha, rsp); 3648 3649 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3650 return QLA_SUCCESS; 3651 3652 queuing_error_fcp_cmnd: 3653 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3654 queuing_error: 3655 if (tot_dsds) 3656 scsi_dma_unmap(cmd); 3657 3658 if (sp->u.scmd.crc_ctx) { 3659 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); 3660 sp->u.scmd.crc_ctx = NULL; 3661 } 3662 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3663 3664 return QLA_FUNCTION_FAILED; 3665 } 3666 3667 static void 3668 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3669 { 3670 struct srb_iocb *aio = &sp->u.iocb_cmd; 3671 scsi_qla_host_t *vha = sp->vha; 3672 struct req_que *req = sp->qpair->req; 3673 srb_t *orig_sp = sp->cmd_sp; 3674 3675 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3676 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3677 abt_iocb->entry_count = 1; 3678 abt_iocb->handle = make_handle(req->id, sp->handle); 3679 if (sp->fcport) { 3680 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3681 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3682 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3683 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3684 } 3685 abt_iocb->handle_to_abort = 3686 make_handle(le16_to_cpu(aio->u.abt.req_que_no), 3687 aio->u.abt.cmd_hndl); 3688 abt_iocb->vp_index = vha->vp_idx; 3689 abt_iocb->req_que_no = aio->u.abt.req_que_no; 3690 3691 /* need to pass original sp */ 3692 if (orig_sp) 3693 qla_nvme_abort_set_option(abt_iocb, orig_sp); 3694 3695 /* Send the command to the firmware */ 3696 wmb(); 3697 } 3698 3699 static void 3700 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) 3701 { 3702 int i, sz; 3703 3704 mbx->entry_type = MBX_IOCB_TYPE; 3705 mbx->handle = sp->handle; 3706 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); 3707 3708 for (i = 0; i < sz; i++) 3709 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; 3710 } 3711 3712 static void 3713 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) 3714 { 3715 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; 3716 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); 3717 ct_pkt->handle = sp->handle; 3718 } 3719 3720 static void qla2x00_send_notify_ack_iocb(srb_t *sp, 3721 struct nack_to_isp *nack) 3722 { 3723 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; 3724 3725 nack->entry_type = NOTIFY_ACK_TYPE; 3726 nack->entry_count = 1; 3727 nack->ox_id = ntfy->ox_id; 3728 3729 nack->u.isp24.handle = sp->handle; 3730 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3731 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3732 nack->u.isp24.flags = ntfy->u.isp24.flags & 3733 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3734 } 3735 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3736 nack->u.isp24.status = ntfy->u.isp24.status; 3737 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3738 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3739 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3740 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3741 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3742 nack->u.isp24.srr_flags = 0; 3743 nack->u.isp24.srr_reject_code = 0; 3744 nack->u.isp24.srr_reject_code_expl = 0; 3745 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3746 3747 if (ntfy->u.isp24.status_subcode == ELS_PLOGI && 3748 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) && 3749 sp->vha->hw->flags.edif_enabled) { 3750 ql_dbg(ql_dbg_disc, sp->vha, 0x3074, 3751 "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n", 3752 sp->name, sp->handle, sp->fcport->loop_id, 3753 sp->fcport->d_id.b24); 3754 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); 3755 } 3756 } 3757 3758 /* 3759 * Build NVME LS request 3760 */ 3761 static void 3762 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) 3763 { 3764 struct srb_iocb *nvme; 3765 3766 nvme = &sp->u.iocb_cmd; 3767 cmd_pkt->entry_type = PT_LS4_REQUEST; 3768 cmd_pkt->entry_count = 1; 3769 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); 3770 3771 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); 3772 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3773 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 3774 3775 cmd_pkt->tx_dseg_count = cpu_to_le16(1); 3776 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); 3777 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); 3778 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); 3779 3780 cmd_pkt->rx_dseg_count = cpu_to_le16(1); 3781 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); 3782 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); 3783 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); 3784 } 3785 3786 static void 3787 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) 3788 { 3789 int map, pos; 3790 3791 vce->entry_type = VP_CTRL_IOCB_TYPE; 3792 vce->handle = sp->handle; 3793 vce->entry_count = 1; 3794 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); 3795 vce->vp_count = cpu_to_le16(1); 3796 3797 /* 3798 * index map in firmware starts with 1; decrement index 3799 * this is ok as we never use index 0 3800 */ 3801 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; 3802 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; 3803 vce->vp_idx_map[map] |= 1 << pos; 3804 } 3805 3806 static void 3807 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) 3808 { 3809 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 3810 logio->control_flags = 3811 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); 3812 3813 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3814 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 3815 logio->port_id[1] = sp->fcport->d_id.b.area; 3816 logio->port_id[2] = sp->fcport->d_id.b.domain; 3817 logio->vp_index = sp->fcport->vha->vp_idx; 3818 } 3819 3820 static int qla_get_iocbs_resource(struct srb *sp) 3821 { 3822 bool get_exch; 3823 bool push_it_through = false; 3824 3825 if (!ql2xenforce_iocb_limit) { 3826 sp->iores.res_type = RESOURCE_NONE; 3827 return 0; 3828 } 3829 sp->iores.res_type = RESOURCE_NONE; 3830 3831 switch (sp->type) { 3832 case SRB_TM_CMD: 3833 case SRB_PRLI_CMD: 3834 case SRB_ADISC_CMD: 3835 push_it_through = true; 3836 fallthrough; 3837 case SRB_LOGIN_CMD: 3838 case SRB_ELS_CMD_RPT: 3839 case SRB_ELS_CMD_HST: 3840 case SRB_ELS_CMD_HST_NOLOGIN: 3841 case SRB_CT_CMD: 3842 case SRB_NVME_LS: 3843 case SRB_ELS_DCMD: 3844 get_exch = true; 3845 break; 3846 3847 case SRB_FXIOCB_DCMD: 3848 case SRB_FXIOCB_BCMD: 3849 sp->iores.res_type = RESOURCE_NONE; 3850 return 0; 3851 3852 case SRB_SA_UPDATE: 3853 case SRB_SA_REPLACE: 3854 case SRB_MB_IOCB: 3855 case SRB_ABT_CMD: 3856 case SRB_NACK_PLOGI: 3857 case SRB_NACK_PRLI: 3858 case SRB_NACK_LOGO: 3859 case SRB_LOGOUT_CMD: 3860 case SRB_CTRL_VP: 3861 case SRB_MARKER: 3862 default: 3863 push_it_through = true; 3864 get_exch = false; 3865 } 3866 3867 sp->iores.res_type |= RESOURCE_IOCB; 3868 sp->iores.iocb_cnt = 1; 3869 if (get_exch) { 3870 sp->iores.res_type |= RESOURCE_EXCH; 3871 sp->iores.exch_cnt = 1; 3872 } 3873 if (push_it_through) 3874 sp->iores.res_type |= RESOURCE_FORCE; 3875 3876 return qla_get_fw_resources(sp->qpair, &sp->iores); 3877 } 3878 3879 static void 3880 qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) 3881 { 3882 mrk->entry_type = MARKER_TYPE; 3883 mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; 3884 if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { 3885 mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); 3886 int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); 3887 host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); 3888 mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; 3889 } 3890 } 3891 3892 int 3893 qla2x00_start_sp(srb_t *sp) 3894 { 3895 int rval = QLA_SUCCESS; 3896 scsi_qla_host_t *vha = sp->vha; 3897 struct qla_hw_data *ha = vha->hw; 3898 struct qla_qpair *qp = sp->qpair; 3899 void *pkt; 3900 unsigned long flags; 3901 3902 if (vha->hw->flags.eeh_busy) 3903 return -EIO; 3904 3905 spin_lock_irqsave(qp->qp_lock_ptr, flags); 3906 rval = qla_get_iocbs_resource(sp); 3907 if (rval) { 3908 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 3909 return -EAGAIN; 3910 } 3911 3912 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); 3913 if (!pkt) { 3914 rval = EAGAIN; 3915 ql_log(ql_log_warn, vha, 0x700c, 3916 "qla2x00_alloc_iocbs failed.\n"); 3917 goto done; 3918 } 3919 3920 switch (sp->type) { 3921 case SRB_LOGIN_CMD: 3922 IS_FWI2_CAPABLE(ha) ? 3923 qla24xx_login_iocb(sp, pkt) : 3924 qla2x00_login_iocb(sp, pkt); 3925 break; 3926 case SRB_PRLI_CMD: 3927 qla24xx_prli_iocb(sp, pkt); 3928 break; 3929 case SRB_LOGOUT_CMD: 3930 IS_FWI2_CAPABLE(ha) ? 3931 qla24xx_logout_iocb(sp, pkt) : 3932 qla2x00_logout_iocb(sp, pkt); 3933 break; 3934 case SRB_ELS_CMD_RPT: 3935 case SRB_ELS_CMD_HST: 3936 qla24xx_els_iocb(sp, pkt); 3937 break; 3938 case SRB_ELS_CMD_HST_NOLOGIN: 3939 qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg); 3940 ((struct els_entry_24xx *)pkt)->handle = sp->handle; 3941 break; 3942 case SRB_CT_CMD: 3943 IS_FWI2_CAPABLE(ha) ? 3944 qla24xx_ct_iocb(sp, pkt) : 3945 qla2x00_ct_iocb(sp, pkt); 3946 break; 3947 case SRB_ADISC_CMD: 3948 IS_FWI2_CAPABLE(ha) ? 3949 qla24xx_adisc_iocb(sp, pkt) : 3950 qla2x00_adisc_iocb(sp, pkt); 3951 break; 3952 case SRB_TM_CMD: 3953 IS_QLAFX00(ha) ? 3954 qlafx00_tm_iocb(sp, pkt) : 3955 qla24xx_tm_iocb(sp, pkt); 3956 break; 3957 case SRB_FXIOCB_DCMD: 3958 case SRB_FXIOCB_BCMD: 3959 qlafx00_fxdisc_iocb(sp, pkt); 3960 break; 3961 case SRB_NVME_LS: 3962 qla_nvme_ls(sp, pkt); 3963 break; 3964 case SRB_ABT_CMD: 3965 IS_QLAFX00(ha) ? 3966 qlafx00_abort_iocb(sp, pkt) : 3967 qla24xx_abort_iocb(sp, pkt); 3968 break; 3969 case SRB_ELS_DCMD: 3970 qla24xx_els_logo_iocb(sp, pkt); 3971 break; 3972 case SRB_CT_PTHRU_CMD: 3973 qla2x00_ctpthru_cmd_iocb(sp, pkt); 3974 break; 3975 case SRB_MB_IOCB: 3976 qla2x00_mb_iocb(sp, pkt); 3977 break; 3978 case SRB_NACK_PLOGI: 3979 case SRB_NACK_PRLI: 3980 case SRB_NACK_LOGO: 3981 qla2x00_send_notify_ack_iocb(sp, pkt); 3982 break; 3983 case SRB_CTRL_VP: 3984 qla25xx_ctrlvp_iocb(sp, pkt); 3985 break; 3986 case SRB_PRLO_CMD: 3987 qla24xx_prlo_iocb(sp, pkt); 3988 break; 3989 case SRB_SA_UPDATE: 3990 qla24xx_sa_update_iocb(sp, pkt); 3991 break; 3992 case SRB_SA_REPLACE: 3993 qla24xx_sa_replace_iocb(sp, pkt); 3994 break; 3995 case SRB_MARKER: 3996 qla_marker_iocb(sp, pkt); 3997 break; 3998 default: 3999 break; 4000 } 4001 4002 if (sp->start_timer) { 4003 /* ref: TMR timer ref 4004 * this code should be just before start_iocbs function 4005 * This will make sure that caller function don't to do 4006 * kref_put even on failure 4007 */ 4008 kref_get(&sp->cmd_kref); 4009 add_timer(&sp->u.iocb_cmd.timer); 4010 } 4011 4012 wmb(); 4013 qla2x00_start_iocbs(vha, qp->req); 4014 done: 4015 if (rval) 4016 qla_put_fw_resources(sp->qpair, &sp->iores); 4017 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 4018 return rval; 4019 } 4020 4021 static void 4022 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 4023 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 4024 { 4025 uint16_t avail_dsds; 4026 struct dsd64 *cur_dsd; 4027 uint32_t req_data_len = 0; 4028 uint32_t rsp_data_len = 0; 4029 struct scatterlist *sg; 4030 int index; 4031 int entry_count = 1; 4032 struct bsg_job *bsg_job = sp->u.bsg_job; 4033 4034 /*Update entry type to indicate bidir command */ 4035 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); 4036 4037 /* Set the transfer direction, in this set both flags 4038 * Also set the BD_WRAP_BACK flag, firmware will take care 4039 * assigning DID=SID for outgoing pkts. 4040 */ 4041 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 4042 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 4043 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 4044 BD_WRAP_BACK); 4045 4046 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 4047 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 4048 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 4049 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 4050 4051 vha->bidi_stats.transfer_bytes += req_data_len; 4052 vha->bidi_stats.io_count++; 4053 4054 vha->qla_stats.output_bytes += req_data_len; 4055 vha->qla_stats.output_requests++; 4056 4057 /* Only one dsd is available for bidirectional IOCB, remaining dsds 4058 * are bundled in continuation iocb 4059 */ 4060 avail_dsds = 1; 4061 cur_dsd = &cmd_pkt->fcp_dsd; 4062 4063 index = 0; 4064 4065 for_each_sg(bsg_job->request_payload.sg_list, sg, 4066 bsg_job->request_payload.sg_cnt, index) { 4067 cont_a64_entry_t *cont_pkt; 4068 4069 /* Allocate additional continuation packets */ 4070 if (avail_dsds == 0) { 4071 /* Continuation type 1 IOCB can accomodate 4072 * 5 DSDS 4073 */ 4074 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 4075 cur_dsd = cont_pkt->dsd; 4076 avail_dsds = 5; 4077 entry_count++; 4078 } 4079 append_dsd64(&cur_dsd, sg); 4080 avail_dsds--; 4081 } 4082 /* For read request DSD will always goes to continuation IOCB 4083 * and follow the write DSD. If there is room on the current IOCB 4084 * then it is added to that IOCB else new continuation IOCB is 4085 * allocated. 4086 */ 4087 for_each_sg(bsg_job->reply_payload.sg_list, sg, 4088 bsg_job->reply_payload.sg_cnt, index) { 4089 cont_a64_entry_t *cont_pkt; 4090 4091 /* Allocate additional continuation packets */ 4092 if (avail_dsds == 0) { 4093 /* Continuation type 1 IOCB can accomodate 4094 * 5 DSDS 4095 */ 4096 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 4097 cur_dsd = cont_pkt->dsd; 4098 avail_dsds = 5; 4099 entry_count++; 4100 } 4101 append_dsd64(&cur_dsd, sg); 4102 avail_dsds--; 4103 } 4104 /* This value should be same as number of IOCB required for this cmd */ 4105 cmd_pkt->entry_count = entry_count; 4106 } 4107 4108 int 4109 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 4110 { 4111 4112 struct qla_hw_data *ha = vha->hw; 4113 unsigned long flags; 4114 uint32_t handle; 4115 uint16_t req_cnt; 4116 uint16_t cnt; 4117 uint32_t *clr_ptr; 4118 struct cmd_bidir *cmd_pkt = NULL; 4119 struct rsp_que *rsp; 4120 struct req_que *req; 4121 int rval = EXT_STATUS_OK; 4122 4123 rval = QLA_SUCCESS; 4124 4125 rsp = ha->rsp_q_map[0]; 4126 req = vha->req; 4127 4128 /* Send marker if required */ 4129 if (vha->marker_needed != 0) { 4130 if (qla2x00_marker(vha, ha->base_qpair, 4131 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 4132 return EXT_STATUS_MAILBOX; 4133 vha->marker_needed = 0; 4134 } 4135 4136 /* Acquire ring specific lock */ 4137 spin_lock_irqsave(&ha->hardware_lock, flags); 4138 4139 handle = qla2xxx_get_next_handle(req); 4140 if (handle == 0) { 4141 rval = EXT_STATUS_BUSY; 4142 goto queuing_error; 4143 } 4144 4145 /* Calculate number of IOCB required */ 4146 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 4147 4148 /* Check for room on request queue. */ 4149 if (req->cnt < req_cnt + 2) { 4150 if (IS_SHADOW_REG_CAPABLE(ha)) { 4151 cnt = *req->out_ptr; 4152 } else { 4153 cnt = rd_reg_dword_relaxed(req->req_q_out); 4154 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 4155 goto queuing_error; 4156 } 4157 4158 if (req->ring_index < cnt) 4159 req->cnt = cnt - req->ring_index; 4160 else 4161 req->cnt = req->length - 4162 (req->ring_index - cnt); 4163 } 4164 if (req->cnt < req_cnt + 2) { 4165 rval = EXT_STATUS_BUSY; 4166 goto queuing_error; 4167 } 4168 4169 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 4170 cmd_pkt->handle = make_handle(req->id, handle); 4171 4172 /* Zero out remaining portion of packet. */ 4173 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 4174 clr_ptr = (uint32_t *)cmd_pkt + 2; 4175 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 4176 4177 /* Set NPORT-ID (of vha)*/ 4178 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 4179 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 4180 cmd_pkt->port_id[1] = vha->d_id.b.area; 4181 cmd_pkt->port_id[2] = vha->d_id.b.domain; 4182 4183 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 4184 cmd_pkt->entry_status = (uint8_t) rsp->id; 4185 /* Build command packet. */ 4186 req->current_outstanding_cmd = handle; 4187 req->outstanding_cmds[handle] = sp; 4188 sp->handle = handle; 4189 req->cnt -= req_cnt; 4190 4191 /* Send the command to the firmware */ 4192 wmb(); 4193 qla2x00_start_iocbs(vha, req); 4194 queuing_error: 4195 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4196 4197 return rval; 4198 } 4199