1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/blkdev.h> 10 #include <linux/delay.h> 11 12 #include <scsi/scsi_tcq.h> 13 14 /** 15 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * @sp: SCSI command 17 * 18 * Returns the proper CF_* direction based on CDB. 19 */ 20 static inline uint16_t 21 qla2x00_get_cmd_direction(srb_t *sp) 22 { 23 uint16_t cflags; 24 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 25 struct scsi_qla_host *vha = sp->vha; 26 27 cflags = 0; 28 29 /* Set transfer direction */ 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 31 cflags = CF_WRITE; 32 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 33 vha->qla_stats.output_requests++; 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 cflags = CF_READ; 36 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 37 vha->qla_stats.input_requests++; 38 } 39 return (cflags); 40 } 41 42 /** 43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 44 * Continuation Type 0 IOCBs to allocate. 45 * 46 * @dsds: number of data segment descriptors needed 47 * 48 * Returns the number of IOCB entries needed to store @dsds. 49 */ 50 uint16_t 51 qla2x00_calc_iocbs_32(uint16_t dsds) 52 { 53 uint16_t iocbs; 54 55 iocbs = 1; 56 if (dsds > 3) { 57 iocbs += (dsds - 3) / 7; 58 if ((dsds - 3) % 7) 59 iocbs++; 60 } 61 return (iocbs); 62 } 63 64 /** 65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 66 * Continuation Type 1 IOCBs to allocate. 67 * 68 * @dsds: number of data segment descriptors needed 69 * 70 * Returns the number of IOCB entries needed to store @dsds. 71 */ 72 uint16_t 73 qla2x00_calc_iocbs_64(uint16_t dsds) 74 { 75 uint16_t iocbs; 76 77 iocbs = 1; 78 if (dsds > 2) { 79 iocbs += (dsds - 2) / 5; 80 if ((dsds - 2) % 5) 81 iocbs++; 82 } 83 return (iocbs); 84 } 85 86 /** 87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 88 * @vha: HA context 89 * 90 * Returns a pointer to the Continuation Type 0 IOCB packet. 91 */ 92 static inline cont_entry_t * 93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 94 { 95 cont_entry_t *cont_pkt; 96 struct req_que *req = vha->req; 97 /* Adjust ring index. */ 98 req->ring_index++; 99 if (req->ring_index == req->length) { 100 req->ring_index = 0; 101 req->ring_ptr = req->ring; 102 } else { 103 req->ring_ptr++; 104 } 105 106 cont_pkt = (cont_entry_t *)req->ring_ptr; 107 108 /* Load packet defaults. */ 109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); 110 111 return (cont_pkt); 112 } 113 114 /** 115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 116 * @vha: HA context 117 * @req: request queue 118 * 119 * Returns a pointer to the continuation type 1 IOCB packet. 120 */ 121 static inline cont_a64_entry_t * 122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 123 { 124 cont_a64_entry_t *cont_pkt; 125 126 /* Adjust ring index. */ 127 req->ring_index++; 128 if (req->ring_index == req->length) { 129 req->ring_index = 0; 130 req->ring_ptr = req->ring; 131 } else { 132 req->ring_ptr++; 133 } 134 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 136 137 /* Load packet defaults. */ 138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : 139 CONTINUE_A64_TYPE, &cont_pkt->entry_type); 140 141 return (cont_pkt); 142 } 143 144 inline int 145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 146 { 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 148 uint8_t guard = scsi_host_get_guard(cmd->device->host); 149 150 /* We always use DIFF Bundling for best performance */ 151 *fw_prot_opts = 0; 152 153 /* Translate SCSI opcode to a protection opcode */ 154 switch (scsi_get_prot_op(cmd)) { 155 case SCSI_PROT_READ_STRIP: 156 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 157 break; 158 case SCSI_PROT_WRITE_INSERT: 159 *fw_prot_opts |= PO_MODE_DIF_INSERT; 160 break; 161 case SCSI_PROT_READ_INSERT: 162 *fw_prot_opts |= PO_MODE_DIF_INSERT; 163 break; 164 case SCSI_PROT_WRITE_STRIP: 165 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 166 break; 167 case SCSI_PROT_READ_PASS: 168 case SCSI_PROT_WRITE_PASS: 169 if (guard & SHOST_DIX_GUARD_IP) 170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 171 else 172 *fw_prot_opts |= PO_MODE_DIF_PASS; 173 break; 174 default: /* Normal Request */ 175 *fw_prot_opts |= PO_MODE_DIF_PASS; 176 break; 177 } 178 179 return scsi_prot_sg_count(cmd); 180 } 181 182 /* 183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 184 * capable IOCB types. 185 * 186 * @sp: SRB command to process 187 * @cmd_pkt: Command type 2 IOCB 188 * @tot_dsds: Total number of segments to transfer 189 */ 190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 191 uint16_t tot_dsds) 192 { 193 uint16_t avail_dsds; 194 struct dsd32 *cur_dsd; 195 scsi_qla_host_t *vha; 196 struct scsi_cmnd *cmd; 197 struct scatterlist *sg; 198 int i; 199 200 cmd = GET_CMD_SP(sp); 201 202 /* Update entry type to indicate Command Type 2 IOCB */ 203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); 204 205 /* No data transfer */ 206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 207 cmd_pkt->byte_count = cpu_to_le32(0); 208 return; 209 } 210 211 vha = sp->vha; 212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 213 214 /* Three DSDs are available in the Command Type 2 IOCB */ 215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); 216 cur_dsd = cmd_pkt->dsd32; 217 218 /* Load data segments */ 219 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 220 cont_entry_t *cont_pkt; 221 222 /* Allocate additional continuation packets? */ 223 if (avail_dsds == 0) { 224 /* 225 * Seven DSDs are available in the Continuation 226 * Type 0 IOCB. 227 */ 228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 229 cur_dsd = cont_pkt->dsd; 230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 231 } 232 233 append_dsd32(&cur_dsd, sg); 234 avail_dsds--; 235 } 236 } 237 238 /** 239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 240 * capable IOCB types. 241 * 242 * @sp: SRB command to process 243 * @cmd_pkt: Command type 3 IOCB 244 * @tot_dsds: Total number of segments to transfer 245 */ 246 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 247 uint16_t tot_dsds) 248 { 249 uint16_t avail_dsds; 250 struct dsd64 *cur_dsd; 251 scsi_qla_host_t *vha; 252 struct scsi_cmnd *cmd; 253 struct scatterlist *sg; 254 int i; 255 256 cmd = GET_CMD_SP(sp); 257 258 /* Update entry type to indicate Command Type 3 IOCB */ 259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); 260 261 /* No data transfer */ 262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 263 cmd_pkt->byte_count = cpu_to_le32(0); 264 return; 265 } 266 267 vha = sp->vha; 268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 269 270 /* Two DSDs are available in the Command Type 3 IOCB */ 271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); 272 cur_dsd = cmd_pkt->dsd64; 273 274 /* Load data segments */ 275 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 276 cont_a64_entry_t *cont_pkt; 277 278 /* Allocate additional continuation packets? */ 279 if (avail_dsds == 0) { 280 /* 281 * Five DSDs are available in the Continuation 282 * Type 1 IOCB. 283 */ 284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 285 cur_dsd = cont_pkt->dsd; 286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 287 } 288 289 append_dsd64(&cur_dsd, sg); 290 avail_dsds--; 291 } 292 } 293 294 /* 295 * Find the first handle that is not in use, starting from 296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is 297 * associated with @req. 298 */ 299 uint32_t qla2xxx_get_next_handle(struct req_que *req) 300 { 301 uint32_t index, handle = req->current_outstanding_cmd; 302 303 for (index = 1; index < req->num_outstanding_cmds; index++) { 304 handle++; 305 if (handle == req->num_outstanding_cmds) 306 handle = 1; 307 if (!req->outstanding_cmds[handle]) 308 return handle; 309 } 310 311 return 0; 312 } 313 314 /** 315 * qla2x00_start_scsi() - Send a SCSI command to the ISP 316 * @sp: command to send to the ISP 317 * 318 * Returns non-zero if a failure occurred, else zero. 319 */ 320 int 321 qla2x00_start_scsi(srb_t *sp) 322 { 323 int nseg; 324 unsigned long flags; 325 scsi_qla_host_t *vha; 326 struct scsi_cmnd *cmd; 327 uint32_t *clr_ptr; 328 uint32_t handle; 329 cmd_entry_t *cmd_pkt; 330 uint16_t cnt; 331 uint16_t req_cnt; 332 uint16_t tot_dsds; 333 struct device_reg_2xxx __iomem *reg; 334 struct qla_hw_data *ha; 335 struct req_que *req; 336 struct rsp_que *rsp; 337 338 /* Setup device pointers. */ 339 vha = sp->vha; 340 ha = vha->hw; 341 reg = &ha->iobase->isp; 342 cmd = GET_CMD_SP(sp); 343 req = ha->req_q_map[0]; 344 rsp = ha->rsp_q_map[0]; 345 /* So we know we haven't pci_map'ed anything yet */ 346 tot_dsds = 0; 347 348 /* Send marker if required */ 349 if (vha->marker_needed != 0) { 350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 351 QLA_SUCCESS) { 352 return (QLA_FUNCTION_FAILED); 353 } 354 vha->marker_needed = 0; 355 } 356 357 /* Acquire ring specific lock */ 358 spin_lock_irqsave(&ha->hardware_lock, flags); 359 360 handle = qla2xxx_get_next_handle(req); 361 if (handle == 0) 362 goto queuing_error; 363 364 /* Map the sg table so we have an accurate count of sg entries needed */ 365 if (scsi_sg_count(cmd)) { 366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 367 scsi_sg_count(cmd), cmd->sc_data_direction); 368 if (unlikely(!nseg)) 369 goto queuing_error; 370 } else 371 nseg = 0; 372 373 tot_dsds = nseg; 374 375 /* Calculate the number of request entries needed. */ 376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 377 if (req->cnt < (req_cnt + 2)) { 378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); 379 if (req->ring_index < cnt) 380 req->cnt = cnt - req->ring_index; 381 else 382 req->cnt = req->length - 383 (req->ring_index - cnt); 384 /* If still no head room then bail out */ 385 if (req->cnt < (req_cnt + 2)) 386 goto queuing_error; 387 } 388 389 /* Build command packet */ 390 req->current_outstanding_cmd = handle; 391 req->outstanding_cmds[handle] = sp; 392 sp->handle = handle; 393 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 394 req->cnt -= req_cnt; 395 396 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 397 cmd_pkt->handle = handle; 398 /* Zero out remaining portion of packet. */ 399 clr_ptr = (uint32_t *)cmd_pkt + 2; 400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 402 403 /* Set target ID and LUN number*/ 404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 407 408 /* Load SCSI command packet. */ 409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 411 412 /* Build IOCB segments */ 413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 414 415 /* Set total data segment count. */ 416 cmd_pkt->entry_count = (uint8_t)req_cnt; 417 wmb(); 418 419 /* Adjust ring index. */ 420 req->ring_index++; 421 if (req->ring_index == req->length) { 422 req->ring_index = 0; 423 req->ring_ptr = req->ring; 424 } else 425 req->ring_ptr++; 426 427 sp->flags |= SRB_DMA_VALID; 428 429 /* Set chip new ring index. */ 430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); 431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 432 433 /* Manage unprocessed RIO/ZIO commands in response queue. */ 434 if (vha->flags.process_response_queue && 435 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 436 qla2x00_process_response_queue(rsp); 437 438 spin_unlock_irqrestore(&ha->hardware_lock, flags); 439 return (QLA_SUCCESS); 440 441 queuing_error: 442 if (tot_dsds) 443 scsi_dma_unmap(cmd); 444 445 spin_unlock_irqrestore(&ha->hardware_lock, flags); 446 447 return (QLA_FUNCTION_FAILED); 448 } 449 450 /** 451 * qla2x00_start_iocbs() - Execute the IOCB command 452 * @vha: HA context 453 * @req: request queue 454 */ 455 void 456 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 457 { 458 struct qla_hw_data *ha = vha->hw; 459 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 460 461 if (IS_P3P_TYPE(ha)) { 462 qla82xx_start_iocbs(vha); 463 } else { 464 /* Adjust ring index. */ 465 req->ring_index++; 466 if (req->ring_index == req->length) { 467 req->ring_index = 0; 468 req->ring_ptr = req->ring; 469 } else 470 req->ring_ptr++; 471 472 /* Set chip new ring index. */ 473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 474 wrt_reg_dword(req->req_q_in, req->ring_index); 475 } else if (IS_QLA83XX(ha)) { 476 wrt_reg_dword(req->req_q_in, req->ring_index); 477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); 478 } else if (IS_QLAFX00(ha)) { 479 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); 480 rd_reg_dword_relaxed(®->ispfx00.req_q_in); 481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 482 } else if (IS_FWI2_CAPABLE(ha)) { 483 wrt_reg_dword(®->isp24.req_q_in, req->ring_index); 484 rd_reg_dword_relaxed(®->isp24.req_q_in); 485 } else { 486 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), 487 req->ring_index); 488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); 489 } 490 } 491 } 492 493 /** 494 * qla2x00_marker() - Send a marker IOCB to the firmware. 495 * @vha: HA context 496 * @qpair: queue pair pointer 497 * @loop_id: loop ID 498 * @lun: LUN 499 * @type: marker modifier 500 * 501 * Can be called from both normal and interrupt context. 502 * 503 * Returns non-zero if a failure occurred, else zero. 504 */ 505 static int 506 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 507 uint16_t loop_id, uint64_t lun, uint8_t type) 508 { 509 mrk_entry_t *mrk; 510 struct mrk_entry_24xx *mrk24 = NULL; 511 struct req_que *req = qpair->req; 512 struct qla_hw_data *ha = vha->hw; 513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 514 515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); 516 if (mrk == NULL) { 517 ql_log(ql_log_warn, base_vha, 0x3026, 518 "Failed to allocate Marker IOCB.\n"); 519 520 return (QLA_FUNCTION_FAILED); 521 } 522 523 mrk->entry_type = MARKER_TYPE; 524 mrk->modifier = type; 525 if (type != MK_SYNC_ALL) { 526 if (IS_FWI2_CAPABLE(ha)) { 527 mrk24 = (struct mrk_entry_24xx *) mrk; 528 mrk24->nport_handle = cpu_to_le16(loop_id); 529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 531 mrk24->vp_index = vha->vp_idx; 532 mrk24->handle = make_handle(req->id, mrk24->handle); 533 } else { 534 SET_TARGET_ID(ha, mrk->target, loop_id); 535 mrk->lun = cpu_to_le16((uint16_t)lun); 536 } 537 } 538 wmb(); 539 540 qla2x00_start_iocbs(vha, req); 541 542 return (QLA_SUCCESS); 543 } 544 545 int 546 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 547 uint16_t loop_id, uint64_t lun, uint8_t type) 548 { 549 int ret; 550 unsigned long flags = 0; 551 552 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); 554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 555 556 return (ret); 557 } 558 559 /* 560 * qla2x00_issue_marker 561 * 562 * Issue marker 563 * Caller CAN have hardware lock held as specified by ha_locked parameter. 564 * Might release it, then reaquire. 565 */ 566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 567 { 568 if (ha_locked) { 569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 570 MK_SYNC_ALL) != QLA_SUCCESS) 571 return QLA_FUNCTION_FAILED; 572 } else { 573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 574 MK_SYNC_ALL) != QLA_SUCCESS) 575 return QLA_FUNCTION_FAILED; 576 } 577 vha->marker_needed = 0; 578 579 return QLA_SUCCESS; 580 } 581 582 static inline int 583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 584 uint16_t tot_dsds) 585 { 586 struct dsd64 *cur_dsd = NULL, *next_dsd; 587 scsi_qla_host_t *vha; 588 struct qla_hw_data *ha; 589 struct scsi_cmnd *cmd; 590 struct scatterlist *cur_seg; 591 uint8_t avail_dsds; 592 uint8_t first_iocb = 1; 593 uint32_t dsd_list_len; 594 struct dsd_dma *dsd_ptr; 595 struct ct6_dsd *ctx; 596 struct qla_qpair *qpair = sp->qpair; 597 598 cmd = GET_CMD_SP(sp); 599 600 /* Update entry type to indicate Command Type 3 IOCB */ 601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); 602 603 /* No data transfer */ 604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 605 cmd_pkt->byte_count = cpu_to_le32(0); 606 return 0; 607 } 608 609 vha = sp->vha; 610 ha = vha->hw; 611 612 /* Set transfer direction */ 613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 615 qpair->counters.output_bytes += scsi_bufflen(cmd); 616 qpair->counters.output_requests++; 617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 619 qpair->counters.input_bytes += scsi_bufflen(cmd); 620 qpair->counters.input_requests++; 621 } 622 623 cur_seg = scsi_sglist(cmd); 624 ctx = sp->u.scmd.ct6_ctx; 625 626 while (tot_dsds) { 627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 628 QLA_DSDS_PER_IOCB : tot_dsds; 629 tot_dsds -= avail_dsds; 630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 631 632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 633 struct dsd_dma, list); 634 next_dsd = dsd_ptr->dsd_addr; 635 list_del(&dsd_ptr->list); 636 ha->gbl_dsd_avail--; 637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 638 ctx->dsd_use_cnt++; 639 ha->gbl_dsd_inuse++; 640 641 if (first_iocb) { 642 first_iocb = 0; 643 put_unaligned_le64(dsd_ptr->dsd_list_dma, 644 &cmd_pkt->fcp_dsd.address); 645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); 646 } else { 647 put_unaligned_le64(dsd_ptr->dsd_list_dma, 648 &cur_dsd->address); 649 cur_dsd->length = cpu_to_le32(dsd_list_len); 650 cur_dsd++; 651 } 652 cur_dsd = next_dsd; 653 while (avail_dsds) { 654 append_dsd64(&cur_dsd, cur_seg); 655 cur_seg = sg_next(cur_seg); 656 avail_dsds--; 657 } 658 } 659 660 /* Null termination */ 661 cur_dsd->address = 0; 662 cur_dsd->length = 0; 663 cur_dsd++; 664 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 665 return 0; 666 } 667 668 /* 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 670 * for Command Type 6. 671 * 672 * @dsds: number of data segment descriptors needed 673 * 674 * Returns the number of dsd list needed to store @dsds. 675 */ 676 static inline uint16_t 677 qla24xx_calc_dsd_lists(uint16_t dsds) 678 { 679 uint16_t dsd_lists = 0; 680 681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 682 if (dsds % QLA_DSDS_PER_IOCB) 683 dsd_lists++; 684 return dsd_lists; 685 } 686 687 688 /** 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 690 * IOCB types. 691 * 692 * @sp: SRB command to process 693 * @cmd_pkt: Command type 3 IOCB 694 * @tot_dsds: Total number of segments to transfer 695 * @req: pointer to request queue 696 */ 697 inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds, struct req_que *req) 700 { 701 uint16_t avail_dsds; 702 struct dsd64 *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 struct qla_qpair *qpair = sp->qpair; 708 709 cmd = GET_CMD_SP(sp); 710 711 /* Update entry type to indicate Command Type 3 IOCB */ 712 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); 713 714 /* No data transfer */ 715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 716 cmd_pkt->byte_count = cpu_to_le32(0); 717 return; 718 } 719 720 vha = sp->vha; 721 722 /* Set transfer direction */ 723 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 725 qpair->counters.output_bytes += scsi_bufflen(cmd); 726 qpair->counters.output_requests++; 727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 729 qpair->counters.input_bytes += scsi_bufflen(cmd); 730 qpair->counters.input_requests++; 731 } 732 733 /* One DSD is available in the Command Type 3 IOCB */ 734 avail_dsds = 1; 735 cur_dsd = &cmd_pkt->dsd; 736 737 /* Load data segments */ 738 739 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 740 cont_a64_entry_t *cont_pkt; 741 742 /* Allocate additional continuation packets? */ 743 if (avail_dsds == 0) { 744 /* 745 * Five DSDs are available in the Continuation 746 * Type 1 IOCB. 747 */ 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 749 cur_dsd = cont_pkt->dsd; 750 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 751 } 752 753 append_dsd64(&cur_dsd, sg); 754 avail_dsds--; 755 } 756 } 757 758 struct fw_dif_context { 759 __le32 ref_tag; 760 __le16 app_tag; 761 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 762 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 763 }; 764 765 /* 766 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 767 * 768 */ 769 static inline void 770 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 771 unsigned int protcnt) 772 { 773 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 774 775 switch (scsi_get_prot_type(cmd)) { 776 case SCSI_PROT_DIF_TYPE0: 777 /* 778 * No check for ql2xenablehba_err_chk, as it would be an 779 * I/O error if hba tag generation is not done. 780 */ 781 pkt->ref_tag = cpu_to_le32((uint32_t) 782 (0xffffffff & scsi_get_lba(cmd))); 783 784 if (!qla2x00_hba_err_chk_enabled(sp)) 785 break; 786 787 pkt->ref_tag_mask[0] = 0xff; 788 pkt->ref_tag_mask[1] = 0xff; 789 pkt->ref_tag_mask[2] = 0xff; 790 pkt->ref_tag_mask[3] = 0xff; 791 break; 792 793 /* 794 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 795 * match LBA in CDB + N 796 */ 797 case SCSI_PROT_DIF_TYPE2: 798 pkt->app_tag = cpu_to_le16(0); 799 pkt->app_tag_mask[0] = 0x0; 800 pkt->app_tag_mask[1] = 0x0; 801 802 pkt->ref_tag = cpu_to_le32((uint32_t) 803 (0xffffffff & scsi_get_lba(cmd))); 804 805 if (!qla2x00_hba_err_chk_enabled(sp)) 806 break; 807 808 /* enable ALL bytes of the ref tag */ 809 pkt->ref_tag_mask[0] = 0xff; 810 pkt->ref_tag_mask[1] = 0xff; 811 pkt->ref_tag_mask[2] = 0xff; 812 pkt->ref_tag_mask[3] = 0xff; 813 break; 814 815 /* For Type 3 protection: 16 bit GUARD only */ 816 case SCSI_PROT_DIF_TYPE3: 817 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 818 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 819 0x00; 820 break; 821 822 /* 823 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 824 * 16 bit app tag. 825 */ 826 case SCSI_PROT_DIF_TYPE1: 827 pkt->ref_tag = cpu_to_le32((uint32_t) 828 (0xffffffff & scsi_get_lba(cmd))); 829 pkt->app_tag = cpu_to_le16(0); 830 pkt->app_tag_mask[0] = 0x0; 831 pkt->app_tag_mask[1] = 0x0; 832 833 if (!qla2x00_hba_err_chk_enabled(sp)) 834 break; 835 836 /* enable ALL bytes of the ref tag */ 837 pkt->ref_tag_mask[0] = 0xff; 838 pkt->ref_tag_mask[1] = 0xff; 839 pkt->ref_tag_mask[2] = 0xff; 840 pkt->ref_tag_mask[3] = 0xff; 841 break; 842 } 843 } 844 845 int 846 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 847 uint32_t *partial) 848 { 849 struct scatterlist *sg; 850 uint32_t cumulative_partial, sg_len; 851 dma_addr_t sg_dma_addr; 852 853 if (sgx->num_bytes == sgx->tot_bytes) 854 return 0; 855 856 sg = sgx->cur_sg; 857 cumulative_partial = sgx->tot_partial; 858 859 sg_dma_addr = sg_dma_address(sg); 860 sg_len = sg_dma_len(sg); 861 862 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 863 864 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 865 sgx->dma_len = (blk_sz - cumulative_partial); 866 sgx->tot_partial = 0; 867 sgx->num_bytes += blk_sz; 868 *partial = 0; 869 } else { 870 sgx->dma_len = sg_len - sgx->bytes_consumed; 871 sgx->tot_partial += sgx->dma_len; 872 *partial = 1; 873 } 874 875 sgx->bytes_consumed += sgx->dma_len; 876 877 if (sg_len == sgx->bytes_consumed) { 878 sg = sg_next(sg); 879 sgx->num_sg++; 880 sgx->cur_sg = sg; 881 sgx->bytes_consumed = 0; 882 } 883 884 return 1; 885 } 886 887 int 888 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 889 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 890 { 891 void *next_dsd; 892 uint8_t avail_dsds = 0; 893 uint32_t dsd_list_len; 894 struct dsd_dma *dsd_ptr; 895 struct scatterlist *sg_prot; 896 struct dsd64 *cur_dsd = dsd; 897 uint16_t used_dsds = tot_dsds; 898 uint32_t prot_int; /* protection interval */ 899 uint32_t partial; 900 struct qla2_sgx sgx; 901 dma_addr_t sle_dma; 902 uint32_t sle_dma_len, tot_prot_dma_len = 0; 903 struct scsi_cmnd *cmd; 904 905 memset(&sgx, 0, sizeof(struct qla2_sgx)); 906 if (sp) { 907 cmd = GET_CMD_SP(sp); 908 prot_int = cmd->device->sector_size; 909 910 sgx.tot_bytes = scsi_bufflen(cmd); 911 sgx.cur_sg = scsi_sglist(cmd); 912 sgx.sp = sp; 913 914 sg_prot = scsi_prot_sglist(cmd); 915 } else if (tc) { 916 prot_int = tc->blk_sz; 917 sgx.tot_bytes = tc->bufflen; 918 sgx.cur_sg = tc->sg; 919 sg_prot = tc->prot_sg; 920 } else { 921 BUG(); 922 return 1; 923 } 924 925 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 926 927 sle_dma = sgx.dma_addr; 928 sle_dma_len = sgx.dma_len; 929 alloc_and_fill: 930 /* Allocate additional continuation packets? */ 931 if (avail_dsds == 0) { 932 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 933 QLA_DSDS_PER_IOCB : used_dsds; 934 dsd_list_len = (avail_dsds + 1) * 12; 935 used_dsds -= avail_dsds; 936 937 /* allocate tracking DS */ 938 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 939 if (!dsd_ptr) 940 return 1; 941 942 /* allocate new list */ 943 dsd_ptr->dsd_addr = next_dsd = 944 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 945 &dsd_ptr->dsd_list_dma); 946 947 if (!next_dsd) { 948 /* 949 * Need to cleanup only this dsd_ptr, rest 950 * will be done by sp_free_dma() 951 */ 952 kfree(dsd_ptr); 953 return 1; 954 } 955 956 if (sp) { 957 list_add_tail(&dsd_ptr->list, 958 &sp->u.scmd.crc_ctx->dsd_list); 959 960 sp->flags |= SRB_CRC_CTX_DSD_VALID; 961 } else { 962 list_add_tail(&dsd_ptr->list, 963 &(tc->ctx->dsd_list)); 964 *tc->ctx_dsd_alloced = 1; 965 } 966 967 968 /* add new list to cmd iocb or last list */ 969 put_unaligned_le64(dsd_ptr->dsd_list_dma, 970 &cur_dsd->address); 971 cur_dsd->length = cpu_to_le32(dsd_list_len); 972 cur_dsd = next_dsd; 973 } 974 put_unaligned_le64(sle_dma, &cur_dsd->address); 975 cur_dsd->length = cpu_to_le32(sle_dma_len); 976 cur_dsd++; 977 avail_dsds--; 978 979 if (partial == 0) { 980 /* Got a full protection interval */ 981 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 982 sle_dma_len = 8; 983 984 tot_prot_dma_len += sle_dma_len; 985 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 986 tot_prot_dma_len = 0; 987 sg_prot = sg_next(sg_prot); 988 } 989 990 partial = 1; /* So as to not re-enter this block */ 991 goto alloc_and_fill; 992 } 993 } 994 /* Null termination */ 995 cur_dsd->address = 0; 996 cur_dsd->length = 0; 997 cur_dsd++; 998 return 0; 999 } 1000 1001 int 1002 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, 1003 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 1004 { 1005 void *next_dsd; 1006 uint8_t avail_dsds = 0; 1007 uint32_t dsd_list_len; 1008 struct dsd_dma *dsd_ptr; 1009 struct scatterlist *sg, *sgl; 1010 struct dsd64 *cur_dsd = dsd; 1011 int i; 1012 uint16_t used_dsds = tot_dsds; 1013 struct scsi_cmnd *cmd; 1014 1015 if (sp) { 1016 cmd = GET_CMD_SP(sp); 1017 sgl = scsi_sglist(cmd); 1018 } else if (tc) { 1019 sgl = tc->sg; 1020 } else { 1021 BUG(); 1022 return 1; 1023 } 1024 1025 1026 for_each_sg(sgl, sg, tot_dsds, i) { 1027 /* Allocate additional continuation packets? */ 1028 if (avail_dsds == 0) { 1029 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1030 QLA_DSDS_PER_IOCB : used_dsds; 1031 dsd_list_len = (avail_dsds + 1) * 12; 1032 used_dsds -= avail_dsds; 1033 1034 /* allocate tracking DS */ 1035 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1036 if (!dsd_ptr) 1037 return 1; 1038 1039 /* allocate new list */ 1040 dsd_ptr->dsd_addr = next_dsd = 1041 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1042 &dsd_ptr->dsd_list_dma); 1043 1044 if (!next_dsd) { 1045 /* 1046 * Need to cleanup only this dsd_ptr, rest 1047 * will be done by sp_free_dma() 1048 */ 1049 kfree(dsd_ptr); 1050 return 1; 1051 } 1052 1053 if (sp) { 1054 list_add_tail(&dsd_ptr->list, 1055 &sp->u.scmd.crc_ctx->dsd_list); 1056 1057 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1058 } else { 1059 list_add_tail(&dsd_ptr->list, 1060 &(tc->ctx->dsd_list)); 1061 *tc->ctx_dsd_alloced = 1; 1062 } 1063 1064 /* add new list to cmd iocb or last list */ 1065 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1066 &cur_dsd->address); 1067 cur_dsd->length = cpu_to_le32(dsd_list_len); 1068 cur_dsd = next_dsd; 1069 } 1070 append_dsd64(&cur_dsd, sg); 1071 avail_dsds--; 1072 1073 } 1074 /* Null termination */ 1075 cur_dsd->address = 0; 1076 cur_dsd->length = 0; 1077 cur_dsd++; 1078 return 0; 1079 } 1080 1081 int 1082 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1083 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1084 { 1085 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1086 struct scatterlist *sg, *sgl; 1087 struct crc_context *difctx = NULL; 1088 struct scsi_qla_host *vha; 1089 uint dsd_list_len; 1090 uint avail_dsds = 0; 1091 uint used_dsds = tot_dsds; 1092 bool dif_local_dma_alloc = false; 1093 bool direction_to_device = false; 1094 int i; 1095 1096 if (sp) { 1097 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1098 1099 sgl = scsi_prot_sglist(cmd); 1100 vha = sp->vha; 1101 difctx = sp->u.scmd.crc_ctx; 1102 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1103 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1104 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1105 __func__, cmd, difctx, sp); 1106 } else if (tc) { 1107 vha = tc->vha; 1108 sgl = tc->prot_sg; 1109 difctx = tc->ctx; 1110 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; 1111 } else { 1112 BUG(); 1113 return 1; 1114 } 1115 1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1117 "%s: enter (write=%u)\n", __func__, direction_to_device); 1118 1119 /* if initiator doing write or target doing read */ 1120 if (direction_to_device) { 1121 for_each_sg(sgl, sg, tot_dsds, i) { 1122 u64 sle_phys = sg_phys(sg); 1123 1124 /* If SGE addr + len flips bits in upper 32-bits */ 1125 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1126 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, 1127 "%s: page boundary crossing (phys=%llx len=%x)\n", 1128 __func__, sle_phys, sg->length); 1129 1130 if (difctx) { 1131 ha->dif_bundle_crossed_pages++; 1132 dif_local_dma_alloc = true; 1133 } else { 1134 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1135 vha, 0xe022, 1136 "%s: difctx pointer is NULL\n", 1137 __func__); 1138 } 1139 break; 1140 } 1141 } 1142 ha->dif_bundle_writes++; 1143 } else { 1144 ha->dif_bundle_reads++; 1145 } 1146 1147 if (ql2xdifbundlinginternalbuffers) 1148 dif_local_dma_alloc = direction_to_device; 1149 1150 if (dif_local_dma_alloc) { 1151 u32 track_difbundl_buf = 0; 1152 u32 ldma_sg_len = 0; 1153 u8 ldma_needed = 1; 1154 1155 difctx->no_dif_bundl = 0; 1156 difctx->dif_bundl_len = 0; 1157 1158 /* Track DSD buffers */ 1159 INIT_LIST_HEAD(&difctx->ldif_dsd_list); 1160 /* Track local DMA buffers */ 1161 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); 1162 1163 for_each_sg(sgl, sg, tot_dsds, i) { 1164 u32 sglen = sg_dma_len(sg); 1165 1166 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1167 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1168 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, 1169 difctx->dif_bundl_len, ldma_needed); 1170 1171 while (sglen) { 1172 u32 xfrlen = 0; 1173 1174 if (ldma_needed) { 1175 /* 1176 * Allocate list item to store 1177 * the DMA buffers 1178 */ 1179 dsd_ptr = kzalloc(sizeof(*dsd_ptr), 1180 GFP_ATOMIC); 1181 if (!dsd_ptr) { 1182 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1183 "%s: failed alloc dsd_ptr\n", 1184 __func__); 1185 return 1; 1186 } 1187 ha->dif_bundle_kallocs++; 1188 1189 /* allocate dma buffer */ 1190 dsd_ptr->dsd_addr = dma_pool_alloc 1191 (ha->dif_bundl_pool, GFP_ATOMIC, 1192 &dsd_ptr->dsd_list_dma); 1193 if (!dsd_ptr->dsd_addr) { 1194 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1195 "%s: failed alloc ->dsd_ptr\n", 1196 __func__); 1197 /* 1198 * need to cleanup only this 1199 * dsd_ptr rest will be done 1200 * by sp_free_dma() 1201 */ 1202 kfree(dsd_ptr); 1203 ha->dif_bundle_kallocs--; 1204 return 1; 1205 } 1206 ha->dif_bundle_dma_allocs++; 1207 ldma_needed = 0; 1208 difctx->no_dif_bundl++; 1209 list_add_tail(&dsd_ptr->list, 1210 &difctx->ldif_dma_hndl_list); 1211 } 1212 1213 /* xfrlen is min of dma pool size and sglen */ 1214 xfrlen = (sglen > 1215 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? 1216 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : 1217 sglen; 1218 1219 /* replace with local allocated dma buffer */ 1220 sg_pcopy_to_buffer(sgl, sg_nents(sgl), 1221 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, 1222 difctx->dif_bundl_len); 1223 difctx->dif_bundl_len += xfrlen; 1224 sglen -= xfrlen; 1225 ldma_sg_len += xfrlen; 1226 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || 1227 sg_is_last(sg)) { 1228 ldma_needed = 1; 1229 ldma_sg_len = 0; 1230 } 1231 } 1232 } 1233 1234 track_difbundl_buf = used_dsds = difctx->no_dif_bundl; 1235 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, 1236 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", 1237 difctx->dif_bundl_len, difctx->no_dif_bundl, 1238 track_difbundl_buf); 1239 1240 if (sp) 1241 sp->flags |= SRB_DIF_BUNDL_DMA_VALID; 1242 else 1243 tc->prot_flags = DIF_BUNDL_DMA_VALID; 1244 1245 list_for_each_entry_safe(dif_dsd, nxt_dsd, 1246 &difctx->ldif_dma_hndl_list, list) { 1247 u32 sglen = (difctx->dif_bundl_len > 1248 DIF_BUNDLING_DMA_POOL_SIZE) ? 1249 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; 1250 1251 BUG_ON(track_difbundl_buf == 0); 1252 1253 /* Allocate additional continuation packets? */ 1254 if (avail_dsds == 0) { 1255 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 1256 0xe024, 1257 "%s: adding continuation iocb's\n", 1258 __func__); 1259 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1260 QLA_DSDS_PER_IOCB : used_dsds; 1261 dsd_list_len = (avail_dsds + 1) * 12; 1262 used_dsds -= avail_dsds; 1263 1264 /* allocate tracking DS */ 1265 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1266 if (!dsd_ptr) { 1267 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1268 "%s: failed alloc dsd_ptr\n", 1269 __func__); 1270 return 1; 1271 } 1272 ha->dif_bundle_kallocs++; 1273 1274 difctx->no_ldif_dsd++; 1275 /* allocate new list */ 1276 dsd_ptr->dsd_addr = 1277 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1278 &dsd_ptr->dsd_list_dma); 1279 if (!dsd_ptr->dsd_addr) { 1280 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1281 "%s: failed alloc ->dsd_addr\n", 1282 __func__); 1283 /* 1284 * need to cleanup only this dsd_ptr 1285 * rest will be done by sp_free_dma() 1286 */ 1287 kfree(dsd_ptr); 1288 ha->dif_bundle_kallocs--; 1289 return 1; 1290 } 1291 ha->dif_bundle_dma_allocs++; 1292 1293 if (sp) { 1294 list_add_tail(&dsd_ptr->list, 1295 &difctx->ldif_dsd_list); 1296 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1297 } else { 1298 list_add_tail(&dsd_ptr->list, 1299 &difctx->ldif_dsd_list); 1300 tc->ctx_dsd_alloced = 1; 1301 } 1302 1303 /* add new list to cmd iocb or last list */ 1304 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1305 &cur_dsd->address); 1306 cur_dsd->length = cpu_to_le32(dsd_list_len); 1307 cur_dsd = dsd_ptr->dsd_addr; 1308 } 1309 put_unaligned_le64(dif_dsd->dsd_list_dma, 1310 &cur_dsd->address); 1311 cur_dsd->length = cpu_to_le32(sglen); 1312 cur_dsd++; 1313 avail_dsds--; 1314 difctx->dif_bundl_len -= sglen; 1315 track_difbundl_buf--; 1316 } 1317 1318 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, 1319 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, 1320 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1321 } else { 1322 for_each_sg(sgl, sg, tot_dsds, i) { 1323 /* Allocate additional continuation packets? */ 1324 if (avail_dsds == 0) { 1325 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1326 QLA_DSDS_PER_IOCB : used_dsds; 1327 dsd_list_len = (avail_dsds + 1) * 12; 1328 used_dsds -= avail_dsds; 1329 1330 /* allocate tracking DS */ 1331 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1332 if (!dsd_ptr) { 1333 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1334 vha, 0xe027, 1335 "%s: failed alloc dsd_dma...\n", 1336 __func__); 1337 return 1; 1338 } 1339 1340 /* allocate new list */ 1341 dsd_ptr->dsd_addr = 1342 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1343 &dsd_ptr->dsd_list_dma); 1344 if (!dsd_ptr->dsd_addr) { 1345 /* need to cleanup only this dsd_ptr */ 1346 /* rest will be done by sp_free_dma() */ 1347 kfree(dsd_ptr); 1348 return 1; 1349 } 1350 1351 if (sp) { 1352 list_add_tail(&dsd_ptr->list, 1353 &difctx->dsd_list); 1354 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1355 } else { 1356 list_add_tail(&dsd_ptr->list, 1357 &difctx->dsd_list); 1358 tc->ctx_dsd_alloced = 1; 1359 } 1360 1361 /* add new list to cmd iocb or last list */ 1362 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1363 &cur_dsd->address); 1364 cur_dsd->length = cpu_to_le32(dsd_list_len); 1365 cur_dsd = dsd_ptr->dsd_addr; 1366 } 1367 append_dsd64(&cur_dsd, sg); 1368 avail_dsds--; 1369 } 1370 } 1371 /* Null termination */ 1372 cur_dsd->address = 0; 1373 cur_dsd->length = 0; 1374 cur_dsd++; 1375 return 0; 1376 } 1377 1378 /** 1379 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1380 * Type 6 IOCB types. 1381 * 1382 * @sp: SRB command to process 1383 * @cmd_pkt: Command type 3 IOCB 1384 * @tot_dsds: Total number of segments to transfer 1385 * @tot_prot_dsds: Total number of segments with protection information 1386 * @fw_prot_opts: Protection options to be passed to firmware 1387 */ 1388 static inline int 1389 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1390 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1391 { 1392 struct dsd64 *cur_dsd; 1393 __be32 *fcp_dl; 1394 scsi_qla_host_t *vha; 1395 struct scsi_cmnd *cmd; 1396 uint32_t total_bytes = 0; 1397 uint32_t data_bytes; 1398 uint32_t dif_bytes; 1399 uint8_t bundling = 1; 1400 uint16_t blk_size; 1401 struct crc_context *crc_ctx_pkt = NULL; 1402 struct qla_hw_data *ha; 1403 uint8_t additional_fcpcdb_len; 1404 uint16_t fcp_cmnd_len; 1405 struct fcp_cmnd *fcp_cmnd; 1406 dma_addr_t crc_ctx_dma; 1407 1408 cmd = GET_CMD_SP(sp); 1409 1410 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1411 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); 1412 1413 vha = sp->vha; 1414 ha = vha->hw; 1415 1416 /* No data transfer */ 1417 data_bytes = scsi_bufflen(cmd); 1418 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1419 cmd_pkt->byte_count = cpu_to_le32(0); 1420 return QLA_SUCCESS; 1421 } 1422 1423 cmd_pkt->vp_index = sp->vha->vp_idx; 1424 1425 /* Set transfer direction */ 1426 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1427 cmd_pkt->control_flags = 1428 cpu_to_le16(CF_WRITE_DATA); 1429 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1430 cmd_pkt->control_flags = 1431 cpu_to_le16(CF_READ_DATA); 1432 } 1433 1434 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1436 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1437 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1438 bundling = 0; 1439 1440 /* Allocate CRC context from global pool */ 1441 crc_ctx_pkt = sp->u.scmd.crc_ctx = 1442 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1443 1444 if (!crc_ctx_pkt) 1445 goto crc_queuing_error; 1446 1447 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1448 1449 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1450 1451 /* Set handle */ 1452 crc_ctx_pkt->handle = cmd_pkt->handle; 1453 1454 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1455 1456 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1457 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1458 1459 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); 1460 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 1461 1462 /* Determine SCSI command length -- align to 4 byte boundary */ 1463 if (cmd->cmd_len > 16) { 1464 additional_fcpcdb_len = cmd->cmd_len - 16; 1465 if ((cmd->cmd_len % 4) != 0) { 1466 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1467 goto crc_queuing_error; 1468 } 1469 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1470 } else { 1471 additional_fcpcdb_len = 0; 1472 fcp_cmnd_len = 12 + 16 + 4; 1473 } 1474 1475 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1476 1477 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1478 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1479 fcp_cmnd->additional_cdb_len |= 1; 1480 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1481 fcp_cmnd->additional_cdb_len |= 2; 1482 1483 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1484 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1485 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1486 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, 1487 &cmd_pkt->fcp_cmnd_dseg_address); 1488 fcp_cmnd->task_management = 0; 1489 fcp_cmnd->task_attribute = TSK_SIMPLE; 1490 1491 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1492 1493 /* Compute dif len and adjust data len to incude protection */ 1494 dif_bytes = 0; 1495 blk_size = cmd->device->sector_size; 1496 dif_bytes = (data_bytes / blk_size) * 8; 1497 1498 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1499 case SCSI_PROT_READ_INSERT: 1500 case SCSI_PROT_WRITE_STRIP: 1501 total_bytes = data_bytes; 1502 data_bytes += dif_bytes; 1503 break; 1504 1505 case SCSI_PROT_READ_STRIP: 1506 case SCSI_PROT_WRITE_INSERT: 1507 case SCSI_PROT_READ_PASS: 1508 case SCSI_PROT_WRITE_PASS: 1509 total_bytes = data_bytes + dif_bytes; 1510 break; 1511 default: 1512 BUG(); 1513 } 1514 1515 if (!qla2x00_hba_err_chk_enabled(sp)) 1516 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1517 /* HBA error checking enabled */ 1518 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1519 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1520 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1521 SCSI_PROT_DIF_TYPE2)) 1522 fw_prot_opts |= BIT_10; 1523 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1524 SCSI_PROT_DIF_TYPE3) 1525 fw_prot_opts |= BIT_11; 1526 } 1527 1528 if (!bundling) { 1529 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 1530 } else { 1531 /* 1532 * Configure Bundling if we need to fetch interlaving 1533 * protection PCI accesses 1534 */ 1535 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1536 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1537 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1538 tot_prot_dsds); 1539 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 1540 } 1541 1542 /* Finish the common fields of CRC pkt */ 1543 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1544 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1545 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1546 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1547 /* Fibre channel byte count */ 1548 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1549 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1550 additional_fcpcdb_len); 1551 *fcp_dl = htonl(total_bytes); 1552 1553 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1554 cmd_pkt->byte_count = cpu_to_le32(0); 1555 return QLA_SUCCESS; 1556 } 1557 /* Walks data segments */ 1558 1559 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1560 1561 if (!bundling && tot_prot_dsds) { 1562 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1563 cur_dsd, tot_dsds, NULL)) 1564 goto crc_queuing_error; 1565 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1566 (tot_dsds - tot_prot_dsds), NULL)) 1567 goto crc_queuing_error; 1568 1569 if (bundling && tot_prot_dsds) { 1570 /* Walks dif segments */ 1571 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1572 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 1573 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1574 tot_prot_dsds, NULL)) 1575 goto crc_queuing_error; 1576 } 1577 return QLA_SUCCESS; 1578 1579 crc_queuing_error: 1580 /* Cleanup will be performed by the caller */ 1581 1582 return QLA_FUNCTION_FAILED; 1583 } 1584 1585 /** 1586 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1587 * @sp: command to send to the ISP 1588 * 1589 * Returns non-zero if a failure occurred, else zero. 1590 */ 1591 int 1592 qla24xx_start_scsi(srb_t *sp) 1593 { 1594 int nseg; 1595 unsigned long flags; 1596 uint32_t *clr_ptr; 1597 uint32_t handle; 1598 struct cmd_type_7 *cmd_pkt; 1599 uint16_t cnt; 1600 uint16_t req_cnt; 1601 uint16_t tot_dsds; 1602 struct req_que *req = NULL; 1603 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1604 struct scsi_qla_host *vha = sp->vha; 1605 struct qla_hw_data *ha = vha->hw; 1606 1607 /* Setup device pointers. */ 1608 req = vha->req; 1609 1610 /* So we know we haven't pci_map'ed anything yet */ 1611 tot_dsds = 0; 1612 1613 /* Send marker if required */ 1614 if (vha->marker_needed != 0) { 1615 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1616 QLA_SUCCESS) 1617 return QLA_FUNCTION_FAILED; 1618 vha->marker_needed = 0; 1619 } 1620 1621 /* Acquire ring specific lock */ 1622 spin_lock_irqsave(&ha->hardware_lock, flags); 1623 1624 handle = qla2xxx_get_next_handle(req); 1625 if (handle == 0) 1626 goto queuing_error; 1627 1628 /* Map the sg table so we have an accurate count of sg entries needed */ 1629 if (scsi_sg_count(cmd)) { 1630 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1631 scsi_sg_count(cmd), cmd->sc_data_direction); 1632 if (unlikely(!nseg)) 1633 goto queuing_error; 1634 } else 1635 nseg = 0; 1636 1637 tot_dsds = nseg; 1638 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1639 1640 sp->iores.res_type = RESOURCE_INI; 1641 sp->iores.iocb_cnt = req_cnt; 1642 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1643 goto queuing_error; 1644 1645 if (req->cnt < (req_cnt + 2)) { 1646 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1647 rd_reg_dword_relaxed(req->req_q_out); 1648 if (req->ring_index < cnt) 1649 req->cnt = cnt - req->ring_index; 1650 else 1651 req->cnt = req->length - 1652 (req->ring_index - cnt); 1653 if (req->cnt < (req_cnt + 2)) 1654 goto queuing_error; 1655 } 1656 1657 /* Build command packet. */ 1658 req->current_outstanding_cmd = handle; 1659 req->outstanding_cmds[handle] = sp; 1660 sp->handle = handle; 1661 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1662 req->cnt -= req_cnt; 1663 1664 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1665 cmd_pkt->handle = make_handle(req->id, handle); 1666 1667 /* Zero out remaining portion of packet. */ 1668 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1669 clr_ptr = (uint32_t *)cmd_pkt + 2; 1670 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1671 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1672 1673 /* Set NPORT-ID and LUN number*/ 1674 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1675 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1676 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1677 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1678 cmd_pkt->vp_index = sp->vha->vp_idx; 1679 1680 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1681 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1682 1683 cmd_pkt->task = TSK_SIMPLE; 1684 1685 /* Load SCSI command packet. */ 1686 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1687 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1688 1689 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1690 1691 /* Build IOCB segments */ 1692 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1693 1694 /* Set total data segment count. */ 1695 cmd_pkt->entry_count = (uint8_t)req_cnt; 1696 wmb(); 1697 /* Adjust ring index. */ 1698 req->ring_index++; 1699 if (req->ring_index == req->length) { 1700 req->ring_index = 0; 1701 req->ring_ptr = req->ring; 1702 } else 1703 req->ring_ptr++; 1704 1705 sp->flags |= SRB_DMA_VALID; 1706 1707 /* Set chip new ring index. */ 1708 wrt_reg_dword(req->req_q_in, req->ring_index); 1709 1710 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1711 return QLA_SUCCESS; 1712 1713 queuing_error: 1714 if (tot_dsds) 1715 scsi_dma_unmap(cmd); 1716 1717 qla_put_iocbs(sp->qpair, &sp->iores); 1718 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1719 1720 return QLA_FUNCTION_FAILED; 1721 } 1722 1723 /** 1724 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1725 * @sp: command to send to the ISP 1726 * 1727 * Returns non-zero if a failure occurred, else zero. 1728 */ 1729 int 1730 qla24xx_dif_start_scsi(srb_t *sp) 1731 { 1732 int nseg; 1733 unsigned long flags; 1734 uint32_t *clr_ptr; 1735 uint32_t handle; 1736 uint16_t cnt; 1737 uint16_t req_cnt = 0; 1738 uint16_t tot_dsds; 1739 uint16_t tot_prot_dsds; 1740 uint16_t fw_prot_opts = 0; 1741 struct req_que *req = NULL; 1742 struct rsp_que *rsp = NULL; 1743 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1744 struct scsi_qla_host *vha = sp->vha; 1745 struct qla_hw_data *ha = vha->hw; 1746 struct cmd_type_crc_2 *cmd_pkt; 1747 uint32_t status = 0; 1748 1749 #define QDSS_GOT_Q_SPACE BIT_0 1750 1751 /* Only process protection or >16 cdb in this routine */ 1752 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1753 if (cmd->cmd_len <= 16) 1754 return qla24xx_start_scsi(sp); 1755 } 1756 1757 /* Setup device pointers. */ 1758 req = vha->req; 1759 rsp = req->rsp; 1760 1761 /* So we know we haven't pci_map'ed anything yet */ 1762 tot_dsds = 0; 1763 1764 /* Send marker if required */ 1765 if (vha->marker_needed != 0) { 1766 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1767 QLA_SUCCESS) 1768 return QLA_FUNCTION_FAILED; 1769 vha->marker_needed = 0; 1770 } 1771 1772 /* Acquire ring specific lock */ 1773 spin_lock_irqsave(&ha->hardware_lock, flags); 1774 1775 handle = qla2xxx_get_next_handle(req); 1776 if (handle == 0) 1777 goto queuing_error; 1778 1779 /* Compute number of required data segments */ 1780 /* Map the sg table so we have an accurate count of sg entries needed */ 1781 if (scsi_sg_count(cmd)) { 1782 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1783 scsi_sg_count(cmd), cmd->sc_data_direction); 1784 if (unlikely(!nseg)) 1785 goto queuing_error; 1786 else 1787 sp->flags |= SRB_DMA_VALID; 1788 1789 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1790 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1791 struct qla2_sgx sgx; 1792 uint32_t partial; 1793 1794 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1795 sgx.tot_bytes = scsi_bufflen(cmd); 1796 sgx.cur_sg = scsi_sglist(cmd); 1797 sgx.sp = sp; 1798 1799 nseg = 0; 1800 while (qla24xx_get_one_block_sg( 1801 cmd->device->sector_size, &sgx, &partial)) 1802 nseg++; 1803 } 1804 } else 1805 nseg = 0; 1806 1807 /* number of required data segments */ 1808 tot_dsds = nseg; 1809 1810 /* Compute number of required protection segments */ 1811 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1812 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1813 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1814 if (unlikely(!nseg)) 1815 goto queuing_error; 1816 else 1817 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1818 1819 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1820 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1821 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1822 } 1823 } else { 1824 nseg = 0; 1825 } 1826 1827 req_cnt = 1; 1828 /* Total Data and protection sg segment(s) */ 1829 tot_prot_dsds = nseg; 1830 tot_dsds += nseg; 1831 1832 sp->iores.res_type = RESOURCE_INI; 1833 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1834 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1835 goto queuing_error; 1836 1837 if (req->cnt < (req_cnt + 2)) { 1838 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1839 rd_reg_dword_relaxed(req->req_q_out); 1840 if (req->ring_index < cnt) 1841 req->cnt = cnt - req->ring_index; 1842 else 1843 req->cnt = req->length - 1844 (req->ring_index - cnt); 1845 if (req->cnt < (req_cnt + 2)) 1846 goto queuing_error; 1847 } 1848 1849 status |= QDSS_GOT_Q_SPACE; 1850 1851 /* Build header part of command packet (excluding the OPCODE). */ 1852 req->current_outstanding_cmd = handle; 1853 req->outstanding_cmds[handle] = sp; 1854 sp->handle = handle; 1855 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1856 req->cnt -= req_cnt; 1857 1858 /* Fill-in common area */ 1859 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1860 cmd_pkt->handle = make_handle(req->id, handle); 1861 1862 clr_ptr = (uint32_t *)cmd_pkt + 2; 1863 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1864 1865 /* Set NPORT-ID and LUN number*/ 1866 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1867 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1868 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1869 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1870 1871 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1872 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1873 1874 /* Total Data and protection segment(s) */ 1875 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1876 1877 /* Build IOCB segments and adjust for data protection segments */ 1878 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1879 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1880 QLA_SUCCESS) 1881 goto queuing_error; 1882 1883 cmd_pkt->entry_count = (uint8_t)req_cnt; 1884 /* Specify response queue number where completion should happen */ 1885 cmd_pkt->entry_status = (uint8_t) rsp->id; 1886 cmd_pkt->timeout = cpu_to_le16(0); 1887 wmb(); 1888 1889 /* Adjust ring index. */ 1890 req->ring_index++; 1891 if (req->ring_index == req->length) { 1892 req->ring_index = 0; 1893 req->ring_ptr = req->ring; 1894 } else 1895 req->ring_ptr++; 1896 1897 /* Set chip new ring index. */ 1898 wrt_reg_dword(req->req_q_in, req->ring_index); 1899 1900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1901 1902 return QLA_SUCCESS; 1903 1904 queuing_error: 1905 if (status & QDSS_GOT_Q_SPACE) { 1906 req->outstanding_cmds[handle] = NULL; 1907 req->cnt += req_cnt; 1908 } 1909 /* Cleanup will be performed by the caller (queuecommand) */ 1910 1911 qla_put_iocbs(sp->qpair, &sp->iores); 1912 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1913 return QLA_FUNCTION_FAILED; 1914 } 1915 1916 /** 1917 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1918 * @sp: command to send to the ISP 1919 * 1920 * Returns non-zero if a failure occurred, else zero. 1921 */ 1922 static int 1923 qla2xxx_start_scsi_mq(srb_t *sp) 1924 { 1925 int nseg; 1926 unsigned long flags; 1927 uint32_t *clr_ptr; 1928 uint32_t handle; 1929 struct cmd_type_7 *cmd_pkt; 1930 uint16_t cnt; 1931 uint16_t req_cnt; 1932 uint16_t tot_dsds; 1933 struct req_que *req = NULL; 1934 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1935 struct scsi_qla_host *vha = sp->fcport->vha; 1936 struct qla_hw_data *ha = vha->hw; 1937 struct qla_qpair *qpair = sp->qpair; 1938 1939 /* Acquire qpair specific lock */ 1940 spin_lock_irqsave(&qpair->qp_lock, flags); 1941 1942 /* Setup qpair pointers */ 1943 req = qpair->req; 1944 1945 /* So we know we haven't pci_map'ed anything yet */ 1946 tot_dsds = 0; 1947 1948 /* Send marker if required */ 1949 if (vha->marker_needed != 0) { 1950 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 1951 QLA_SUCCESS) { 1952 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1953 return QLA_FUNCTION_FAILED; 1954 } 1955 vha->marker_needed = 0; 1956 } 1957 1958 handle = qla2xxx_get_next_handle(req); 1959 if (handle == 0) 1960 goto queuing_error; 1961 1962 /* Map the sg table so we have an accurate count of sg entries needed */ 1963 if (scsi_sg_count(cmd)) { 1964 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1965 scsi_sg_count(cmd), cmd->sc_data_direction); 1966 if (unlikely(!nseg)) 1967 goto queuing_error; 1968 } else 1969 nseg = 0; 1970 1971 tot_dsds = nseg; 1972 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1973 1974 sp->iores.res_type = RESOURCE_INI; 1975 sp->iores.iocb_cnt = req_cnt; 1976 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1977 goto queuing_error; 1978 1979 if (req->cnt < (req_cnt + 2)) { 1980 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1981 rd_reg_dword_relaxed(req->req_q_out); 1982 if (req->ring_index < cnt) 1983 req->cnt = cnt - req->ring_index; 1984 else 1985 req->cnt = req->length - 1986 (req->ring_index - cnt); 1987 if (req->cnt < (req_cnt + 2)) 1988 goto queuing_error; 1989 } 1990 1991 /* Build command packet. */ 1992 req->current_outstanding_cmd = handle; 1993 req->outstanding_cmds[handle] = sp; 1994 sp->handle = handle; 1995 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1996 req->cnt -= req_cnt; 1997 1998 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1999 cmd_pkt->handle = make_handle(req->id, handle); 2000 2001 /* Zero out remaining portion of packet. */ 2002 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2003 clr_ptr = (uint32_t *)cmd_pkt + 2; 2004 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2005 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2006 2007 /* Set NPORT-ID and LUN number*/ 2008 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2009 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2010 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2011 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2012 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2013 2014 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2015 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2016 2017 cmd_pkt->task = TSK_SIMPLE; 2018 2019 /* Load SCSI command packet. */ 2020 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2021 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2022 2023 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2024 2025 /* Build IOCB segments */ 2026 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2027 2028 /* Set total data segment count. */ 2029 cmd_pkt->entry_count = (uint8_t)req_cnt; 2030 wmb(); 2031 /* Adjust ring index. */ 2032 req->ring_index++; 2033 if (req->ring_index == req->length) { 2034 req->ring_index = 0; 2035 req->ring_ptr = req->ring; 2036 } else 2037 req->ring_ptr++; 2038 2039 sp->flags |= SRB_DMA_VALID; 2040 2041 /* Set chip new ring index. */ 2042 wrt_reg_dword(req->req_q_in, req->ring_index); 2043 2044 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2045 return QLA_SUCCESS; 2046 2047 queuing_error: 2048 if (tot_dsds) 2049 scsi_dma_unmap(cmd); 2050 2051 qla_put_iocbs(sp->qpair, &sp->iores); 2052 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2053 2054 return QLA_FUNCTION_FAILED; 2055 } 2056 2057 2058 /** 2059 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 2060 * @sp: command to send to the ISP 2061 * 2062 * Returns non-zero if a failure occurred, else zero. 2063 */ 2064 int 2065 qla2xxx_dif_start_scsi_mq(srb_t *sp) 2066 { 2067 int nseg; 2068 unsigned long flags; 2069 uint32_t *clr_ptr; 2070 uint32_t handle; 2071 uint16_t cnt; 2072 uint16_t req_cnt = 0; 2073 uint16_t tot_dsds; 2074 uint16_t tot_prot_dsds; 2075 uint16_t fw_prot_opts = 0; 2076 struct req_que *req = NULL; 2077 struct rsp_que *rsp = NULL; 2078 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2079 struct scsi_qla_host *vha = sp->fcport->vha; 2080 struct qla_hw_data *ha = vha->hw; 2081 struct cmd_type_crc_2 *cmd_pkt; 2082 uint32_t status = 0; 2083 struct qla_qpair *qpair = sp->qpair; 2084 2085 #define QDSS_GOT_Q_SPACE BIT_0 2086 2087 /* Check for host side state */ 2088 if (!qpair->online) { 2089 cmd->result = DID_NO_CONNECT << 16; 2090 return QLA_INTERFACE_ERROR; 2091 } 2092 2093 if (!qpair->difdix_supported && 2094 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2095 cmd->result = DID_NO_CONNECT << 16; 2096 return QLA_INTERFACE_ERROR; 2097 } 2098 2099 /* Only process protection or >16 cdb in this routine */ 2100 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 2101 if (cmd->cmd_len <= 16) 2102 return qla2xxx_start_scsi_mq(sp); 2103 } 2104 2105 spin_lock_irqsave(&qpair->qp_lock, flags); 2106 2107 /* Setup qpair pointers */ 2108 rsp = qpair->rsp; 2109 req = qpair->req; 2110 2111 /* So we know we haven't pci_map'ed anything yet */ 2112 tot_dsds = 0; 2113 2114 /* Send marker if required */ 2115 if (vha->marker_needed != 0) { 2116 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 2117 QLA_SUCCESS) { 2118 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2119 return QLA_FUNCTION_FAILED; 2120 } 2121 vha->marker_needed = 0; 2122 } 2123 2124 handle = qla2xxx_get_next_handle(req); 2125 if (handle == 0) 2126 goto queuing_error; 2127 2128 /* Compute number of required data segments */ 2129 /* Map the sg table so we have an accurate count of sg entries needed */ 2130 if (scsi_sg_count(cmd)) { 2131 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2132 scsi_sg_count(cmd), cmd->sc_data_direction); 2133 if (unlikely(!nseg)) 2134 goto queuing_error; 2135 else 2136 sp->flags |= SRB_DMA_VALID; 2137 2138 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2139 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2140 struct qla2_sgx sgx; 2141 uint32_t partial; 2142 2143 memset(&sgx, 0, sizeof(struct qla2_sgx)); 2144 sgx.tot_bytes = scsi_bufflen(cmd); 2145 sgx.cur_sg = scsi_sglist(cmd); 2146 sgx.sp = sp; 2147 2148 nseg = 0; 2149 while (qla24xx_get_one_block_sg( 2150 cmd->device->sector_size, &sgx, &partial)) 2151 nseg++; 2152 } 2153 } else 2154 nseg = 0; 2155 2156 /* number of required data segments */ 2157 tot_dsds = nseg; 2158 2159 /* Compute number of required protection segments */ 2160 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2161 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2162 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2163 if (unlikely(!nseg)) 2164 goto queuing_error; 2165 else 2166 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2167 2168 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2169 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2170 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2171 } 2172 } else { 2173 nseg = 0; 2174 } 2175 2176 req_cnt = 1; 2177 /* Total Data and protection sg segment(s) */ 2178 tot_prot_dsds = nseg; 2179 tot_dsds += nseg; 2180 2181 sp->iores.res_type = RESOURCE_INI; 2182 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2183 if (qla_get_iocbs(sp->qpair, &sp->iores)) 2184 goto queuing_error; 2185 2186 if (req->cnt < (req_cnt + 2)) { 2187 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2188 rd_reg_dword_relaxed(req->req_q_out); 2189 if (req->ring_index < cnt) 2190 req->cnt = cnt - req->ring_index; 2191 else 2192 req->cnt = req->length - 2193 (req->ring_index - cnt); 2194 if (req->cnt < (req_cnt + 2)) 2195 goto queuing_error; 2196 } 2197 2198 status |= QDSS_GOT_Q_SPACE; 2199 2200 /* Build header part of command packet (excluding the OPCODE). */ 2201 req->current_outstanding_cmd = handle; 2202 req->outstanding_cmds[handle] = sp; 2203 sp->handle = handle; 2204 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2205 req->cnt -= req_cnt; 2206 2207 /* Fill-in common area */ 2208 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2209 cmd_pkt->handle = make_handle(req->id, handle); 2210 2211 clr_ptr = (uint32_t *)cmd_pkt + 2; 2212 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2213 2214 /* Set NPORT-ID and LUN number*/ 2215 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2216 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2217 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2218 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2219 2220 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2221 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2222 2223 /* Total Data and protection segment(s) */ 2224 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2225 2226 /* Build IOCB segments and adjust for data protection segments */ 2227 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2228 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2229 QLA_SUCCESS) 2230 goto queuing_error; 2231 2232 cmd_pkt->entry_count = (uint8_t)req_cnt; 2233 cmd_pkt->timeout = cpu_to_le16(0); 2234 wmb(); 2235 2236 /* Adjust ring index. */ 2237 req->ring_index++; 2238 if (req->ring_index == req->length) { 2239 req->ring_index = 0; 2240 req->ring_ptr = req->ring; 2241 } else 2242 req->ring_ptr++; 2243 2244 /* Set chip new ring index. */ 2245 wrt_reg_dword(req->req_q_in, req->ring_index); 2246 2247 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2248 if (vha->flags.process_response_queue && 2249 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2250 qla24xx_process_response_queue(vha, rsp); 2251 2252 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2253 2254 return QLA_SUCCESS; 2255 2256 queuing_error: 2257 if (status & QDSS_GOT_Q_SPACE) { 2258 req->outstanding_cmds[handle] = NULL; 2259 req->cnt += req_cnt; 2260 } 2261 /* Cleanup will be performed by the caller (queuecommand) */ 2262 2263 qla_put_iocbs(sp->qpair, &sp->iores); 2264 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2265 return QLA_FUNCTION_FAILED; 2266 } 2267 2268 /* Generic Control-SRB manipulation functions. */ 2269 2270 /* hardware_lock assumed to be held. */ 2271 2272 void * 2273 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) 2274 { 2275 scsi_qla_host_t *vha = qpair->vha; 2276 struct qla_hw_data *ha = vha->hw; 2277 struct req_que *req = qpair->req; 2278 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2279 uint32_t handle; 2280 request_t *pkt; 2281 uint16_t cnt, req_cnt; 2282 2283 pkt = NULL; 2284 req_cnt = 1; 2285 handle = 0; 2286 2287 if (sp && (sp->type != SRB_SCSI_CMD)) { 2288 /* Adjust entry-counts as needed. */ 2289 req_cnt = sp->iocbs; 2290 } 2291 2292 /* Check for room on request queue. */ 2293 if (req->cnt < req_cnt + 2) { 2294 if (qpair->use_shadow_reg) 2295 cnt = *req->out_ptr; 2296 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2297 IS_QLA28XX(ha)) 2298 cnt = rd_reg_dword(®->isp25mq.req_q_out); 2299 else if (IS_P3P_TYPE(ha)) 2300 cnt = rd_reg_dword(reg->isp82.req_q_out); 2301 else if (IS_FWI2_CAPABLE(ha)) 2302 cnt = rd_reg_dword(®->isp24.req_q_out); 2303 else if (IS_QLAFX00(ha)) 2304 cnt = rd_reg_dword(®->ispfx00.req_q_out); 2305 else 2306 cnt = qla2x00_debounce_register( 2307 ISP_REQ_Q_OUT(ha, ®->isp)); 2308 2309 if (req->ring_index < cnt) 2310 req->cnt = cnt - req->ring_index; 2311 else 2312 req->cnt = req->length - 2313 (req->ring_index - cnt); 2314 } 2315 if (req->cnt < req_cnt + 2) 2316 goto queuing_error; 2317 2318 if (sp) { 2319 handle = qla2xxx_get_next_handle(req); 2320 if (handle == 0) { 2321 ql_log(ql_log_warn, vha, 0x700b, 2322 "No room on outstanding cmd array.\n"); 2323 goto queuing_error; 2324 } 2325 2326 /* Prep command array. */ 2327 req->current_outstanding_cmd = handle; 2328 req->outstanding_cmds[handle] = sp; 2329 sp->handle = handle; 2330 } 2331 2332 /* Prep packet */ 2333 req->cnt -= req_cnt; 2334 pkt = req->ring_ptr; 2335 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2336 if (IS_QLAFX00(ha)) { 2337 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); 2338 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); 2339 } else { 2340 pkt->entry_count = req_cnt; 2341 pkt->handle = handle; 2342 } 2343 2344 return pkt; 2345 2346 queuing_error: 2347 qpair->tgt_counters.num_alloc_iocb_failed++; 2348 return pkt; 2349 } 2350 2351 void * 2352 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) 2353 { 2354 scsi_qla_host_t *vha = qpair->vha; 2355 2356 if (qla2x00_reset_active(vha)) 2357 return NULL; 2358 2359 return __qla2x00_alloc_iocbs(qpair, sp); 2360 } 2361 2362 void * 2363 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) 2364 { 2365 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); 2366 } 2367 2368 static void 2369 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2370 { 2371 struct srb_iocb *lio = &sp->u.iocb_cmd; 2372 2373 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2374 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2375 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { 2376 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); 2377 if (sp->vha->flags.nvme_first_burst) 2378 logio->io_parameter[0] = 2379 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); 2380 if (sp->vha->flags.nvme2_enabled) { 2381 /* Set service parameter BIT_7 for NVME CONF support */ 2382 logio->io_parameter[0] |= NVME_PRLI_SP_CONF; 2383 /* Set service parameter BIT_8 for SLER support */ 2384 logio->io_parameter[0] |= 2385 cpu_to_le32(NVME_PRLI_SP_SLER); 2386 /* Set service parameter BIT_9 for PI control support */ 2387 logio->io_parameter[0] |= 2388 cpu_to_le32(NVME_PRLI_SP_PI_CTRL); 2389 } 2390 } 2391 2392 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2393 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2394 logio->port_id[1] = sp->fcport->d_id.b.area; 2395 logio->port_id[2] = sp->fcport->d_id.b.domain; 2396 logio->vp_index = sp->vha->vp_idx; 2397 } 2398 2399 static void 2400 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2401 { 2402 struct srb_iocb *lio = &sp->u.iocb_cmd; 2403 2404 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2405 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2406 2407 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { 2408 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2409 } else { 2410 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2411 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2412 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2413 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2414 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2415 } 2416 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2417 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2418 logio->port_id[1] = sp->fcport->d_id.b.area; 2419 logio->port_id[2] = sp->fcport->d_id.b.domain; 2420 logio->vp_index = sp->vha->vp_idx; 2421 } 2422 2423 static void 2424 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2425 { 2426 struct qla_hw_data *ha = sp->vha->hw; 2427 struct srb_iocb *lio = &sp->u.iocb_cmd; 2428 uint16_t opts; 2429 2430 mbx->entry_type = MBX_IOCB_TYPE; 2431 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2432 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2433 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2434 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2435 if (HAS_EXTENDED_IDS(ha)) { 2436 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2437 mbx->mb10 = cpu_to_le16(opts); 2438 } else { 2439 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2440 } 2441 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2442 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2443 sp->fcport->d_id.b.al_pa); 2444 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2445 } 2446 2447 static void 2448 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2449 { 2450 u16 control_flags = LCF_COMMAND_LOGO; 2451 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2452 2453 if (sp->fcport->explicit_logout) { 2454 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; 2455 } else { 2456 control_flags |= LCF_IMPL_LOGO; 2457 2458 if (!sp->fcport->keep_nport_handle) 2459 control_flags |= LCF_FREE_NPORT; 2460 } 2461 2462 logio->control_flags = cpu_to_le16(control_flags); 2463 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2464 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2465 logio->port_id[1] = sp->fcport->d_id.b.area; 2466 logio->port_id[2] = sp->fcport->d_id.b.domain; 2467 logio->vp_index = sp->vha->vp_idx; 2468 } 2469 2470 static void 2471 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2472 { 2473 struct qla_hw_data *ha = sp->vha->hw; 2474 2475 mbx->entry_type = MBX_IOCB_TYPE; 2476 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2477 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2478 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2479 cpu_to_le16(sp->fcport->loop_id) : 2480 cpu_to_le16(sp->fcport->loop_id << 8); 2481 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2482 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2483 sp->fcport->d_id.b.al_pa); 2484 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2485 /* Implicit: mbx->mbx10 = 0. */ 2486 } 2487 2488 static void 2489 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2490 { 2491 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2492 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2493 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2494 logio->vp_index = sp->vha->vp_idx; 2495 } 2496 2497 static void 2498 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2499 { 2500 struct qla_hw_data *ha = sp->vha->hw; 2501 2502 mbx->entry_type = MBX_IOCB_TYPE; 2503 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2504 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2505 if (HAS_EXTENDED_IDS(ha)) { 2506 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2507 mbx->mb10 = cpu_to_le16(BIT_0); 2508 } else { 2509 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2510 } 2511 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2512 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2513 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2514 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2515 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2516 } 2517 2518 static void 2519 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2520 { 2521 uint32_t flags; 2522 uint64_t lun; 2523 struct fc_port *fcport = sp->fcport; 2524 scsi_qla_host_t *vha = fcport->vha; 2525 struct qla_hw_data *ha = vha->hw; 2526 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2527 struct req_que *req = vha->req; 2528 2529 flags = iocb->u.tmf.flags; 2530 lun = iocb->u.tmf.lun; 2531 2532 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2533 tsk->entry_count = 1; 2534 tsk->handle = make_handle(req->id, tsk->handle); 2535 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2536 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2537 tsk->control_flags = cpu_to_le32(flags); 2538 tsk->port_id[0] = fcport->d_id.b.al_pa; 2539 tsk->port_id[1] = fcport->d_id.b.area; 2540 tsk->port_id[2] = fcport->d_id.b.domain; 2541 tsk->vp_index = fcport->vha->vp_idx; 2542 2543 if (flags == TCF_LUN_RESET) { 2544 int_to_scsilun(lun, &tsk->lun); 2545 host_to_fcp_swap((uint8_t *)&tsk->lun, 2546 sizeof(tsk->lun)); 2547 } 2548 } 2549 2550 void qla2x00_init_timer(srb_t *sp, unsigned long tmo) 2551 { 2552 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 2553 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 2554 sp->free = qla2x00_sp_free; 2555 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2556 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2557 sp->start_timer = 1; 2558 } 2559 2560 static void qla2x00_els_dcmd_sp_free(srb_t *sp) 2561 { 2562 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2563 2564 kfree(sp->fcport); 2565 2566 if (elsio->u.els_logo.els_logo_pyld) 2567 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, 2568 elsio->u.els_logo.els_logo_pyld, 2569 elsio->u.els_logo.els_logo_pyld_dma); 2570 2571 del_timer(&elsio->timer); 2572 qla2x00_rel_sp(sp); 2573 } 2574 2575 static void 2576 qla2x00_els_dcmd_iocb_timeout(void *data) 2577 { 2578 srb_t *sp = data; 2579 fc_port_t *fcport = sp->fcport; 2580 struct scsi_qla_host *vha = sp->vha; 2581 struct srb_iocb *lio = &sp->u.iocb_cmd; 2582 unsigned long flags = 0; 2583 int res, h; 2584 2585 ql_dbg(ql_dbg_io, vha, 0x3069, 2586 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2587 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2588 fcport->d_id.b.al_pa); 2589 2590 /* Abort the exchange */ 2591 res = qla24xx_async_abort_cmd(sp, false); 2592 if (res) { 2593 ql_dbg(ql_dbg_io, vha, 0x3070, 2594 "mbx abort_command failed.\n"); 2595 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2596 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2597 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2598 sp->qpair->req->outstanding_cmds[h] = NULL; 2599 break; 2600 } 2601 } 2602 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2603 complete(&lio->u.els_logo.comp); 2604 } else { 2605 ql_dbg(ql_dbg_io, vha, 0x3071, 2606 "mbx abort_command success.\n"); 2607 } 2608 } 2609 2610 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) 2611 { 2612 fc_port_t *fcport = sp->fcport; 2613 struct srb_iocb *lio = &sp->u.iocb_cmd; 2614 struct scsi_qla_host *vha = sp->vha; 2615 2616 ql_dbg(ql_dbg_io, vha, 0x3072, 2617 "%s hdl=%x, portid=%02x%02x%02x done\n", 2618 sp->name, sp->handle, fcport->d_id.b.domain, 2619 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2620 2621 complete(&lio->u.els_logo.comp); 2622 } 2623 2624 int 2625 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2626 port_id_t remote_did) 2627 { 2628 srb_t *sp; 2629 fc_port_t *fcport = NULL; 2630 struct srb_iocb *elsio = NULL; 2631 struct qla_hw_data *ha = vha->hw; 2632 struct els_logo_payload logo_pyld; 2633 int rval = QLA_SUCCESS; 2634 2635 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2636 if (!fcport) { 2637 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2638 return -ENOMEM; 2639 } 2640 2641 /* Alloc SRB structure */ 2642 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2643 if (!sp) { 2644 kfree(fcport); 2645 ql_log(ql_log_info, vha, 0x70e6, 2646 "SRB allocation failed\n"); 2647 return -ENOMEM; 2648 } 2649 2650 elsio = &sp->u.iocb_cmd; 2651 fcport->loop_id = 0xFFFF; 2652 fcport->d_id.b.domain = remote_did.b.domain; 2653 fcport->d_id.b.area = remote_did.b.area; 2654 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2655 2656 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2657 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2658 2659 sp->type = SRB_ELS_DCMD; 2660 sp->name = "ELS_DCMD"; 2661 sp->fcport = fcport; 2662 elsio->timeout = qla2x00_els_dcmd_iocb_timeout; 2663 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); 2664 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 2665 sp->done = qla2x00_els_dcmd_sp_done; 2666 sp->free = qla2x00_els_dcmd_sp_free; 2667 2668 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2669 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2670 GFP_KERNEL); 2671 2672 if (!elsio->u.els_logo.els_logo_pyld) { 2673 sp->free(sp); 2674 return QLA_FUNCTION_FAILED; 2675 } 2676 2677 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2678 2679 elsio->u.els_logo.els_cmd = els_opcode; 2680 logo_pyld.opcode = els_opcode; 2681 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2682 logo_pyld.s_id[1] = vha->d_id.b.area; 2683 logo_pyld.s_id[2] = vha->d_id.b.domain; 2684 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2685 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2686 2687 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2688 sizeof(struct els_logo_payload)); 2689 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); 2690 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, 2691 elsio->u.els_logo.els_logo_pyld, 2692 sizeof(*elsio->u.els_logo.els_logo_pyld)); 2693 2694 rval = qla2x00_start_sp(sp); 2695 if (rval != QLA_SUCCESS) { 2696 sp->free(sp); 2697 return QLA_FUNCTION_FAILED; 2698 } 2699 2700 ql_dbg(ql_dbg_io, vha, 0x3074, 2701 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2702 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2703 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2704 2705 wait_for_completion(&elsio->u.els_logo.comp); 2706 2707 sp->free(sp); 2708 return rval; 2709 } 2710 2711 static void 2712 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2713 { 2714 scsi_qla_host_t *vha = sp->vha; 2715 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2716 2717 els_iocb->entry_type = ELS_IOCB_TYPE; 2718 els_iocb->entry_count = 1; 2719 els_iocb->sys_define = 0; 2720 els_iocb->entry_status = 0; 2721 els_iocb->handle = sp->handle; 2722 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2723 els_iocb->tx_dsd_count = cpu_to_le16(1); 2724 els_iocb->vp_index = vha->vp_idx; 2725 els_iocb->sof_type = EST_SOFI3; 2726 els_iocb->rx_dsd_count = 0; 2727 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2728 2729 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 2730 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 2731 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 2732 /* For SID the byte order is different than DID */ 2733 els_iocb->s_id[1] = vha->d_id.b.al_pa; 2734 els_iocb->s_id[2] = vha->d_id.b.area; 2735 els_iocb->s_id[0] = vha->d_id.b.domain; 2736 2737 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2738 els_iocb->control_flags = 0; 2739 els_iocb->tx_byte_count = els_iocb->tx_len = 2740 cpu_to_le32(sizeof(struct els_plogi_payload)); 2741 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2742 &els_iocb->tx_address); 2743 els_iocb->rx_dsd_count = cpu_to_le16(1); 2744 els_iocb->rx_byte_count = els_iocb->rx_len = 2745 cpu_to_le32(sizeof(struct els_plogi_payload)); 2746 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2747 &els_iocb->rx_address); 2748 2749 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, 2750 "PLOGI ELS IOCB:\n"); 2751 ql_dump_buffer(ql_log_info, vha, 0x0109, 2752 (uint8_t *)els_iocb, 2753 sizeof(*els_iocb)); 2754 } else { 2755 els_iocb->control_flags = cpu_to_le16(1 << 13); 2756 els_iocb->tx_byte_count = 2757 cpu_to_le32(sizeof(struct els_logo_payload)); 2758 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2759 &els_iocb->tx_address); 2760 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2761 2762 els_iocb->rx_byte_count = 0; 2763 els_iocb->rx_address = 0; 2764 els_iocb->rx_len = 0; 2765 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, 2766 "LOGO ELS IOCB:"); 2767 ql_dump_buffer(ql_log_info, vha, 0x010b, 2768 els_iocb, 2769 sizeof(*els_iocb)); 2770 } 2771 2772 sp->vha->qla_stats.control_requests++; 2773 } 2774 2775 static void 2776 qla2x00_els_dcmd2_iocb_timeout(void *data) 2777 { 2778 srb_t *sp = data; 2779 fc_port_t *fcport = sp->fcport; 2780 struct scsi_qla_host *vha = sp->vha; 2781 unsigned long flags = 0; 2782 int res, h; 2783 2784 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, 2785 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", 2786 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); 2787 2788 /* Abort the exchange */ 2789 res = qla24xx_async_abort_cmd(sp, false); 2790 ql_dbg(ql_dbg_io, vha, 0x3070, 2791 "mbx abort_command %s\n", 2792 (res == QLA_SUCCESS) ? "successful" : "failed"); 2793 if (res) { 2794 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2795 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2796 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2797 sp->qpair->req->outstanding_cmds[h] = NULL; 2798 break; 2799 } 2800 } 2801 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2802 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2803 } 2804 } 2805 2806 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) 2807 { 2808 if (els_plogi->els_plogi_pyld) 2809 dma_free_coherent(&vha->hw->pdev->dev, 2810 els_plogi->tx_size, 2811 els_plogi->els_plogi_pyld, 2812 els_plogi->els_plogi_pyld_dma); 2813 2814 if (els_plogi->els_resp_pyld) 2815 dma_free_coherent(&vha->hw->pdev->dev, 2816 els_plogi->rx_size, 2817 els_plogi->els_resp_pyld, 2818 els_plogi->els_resp_pyld_dma); 2819 } 2820 2821 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) 2822 { 2823 fc_port_t *fcport = sp->fcport; 2824 struct srb_iocb *lio = &sp->u.iocb_cmd; 2825 struct scsi_qla_host *vha = sp->vha; 2826 struct event_arg ea; 2827 struct qla_work_evt *e; 2828 struct fc_port *conflict_fcport; 2829 port_id_t cid; /* conflict Nport id */ 2830 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; 2831 u16 lid; 2832 2833 ql_dbg(ql_dbg_disc, vha, 0x3072, 2834 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", 2835 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); 2836 2837 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 2838 del_timer(&sp->u.iocb_cmd.timer); 2839 2840 if (sp->flags & SRB_WAKEUP_ON_COMP) 2841 complete(&lio->u.els_plogi.comp); 2842 else { 2843 switch (le32_to_cpu(fw_status[0])) { 2844 case CS_DATA_UNDERRUN: 2845 case CS_COMPLETE: 2846 memset(&ea, 0, sizeof(ea)); 2847 ea.fcport = fcport; 2848 ea.rc = res; 2849 qla_handle_els_plogi_done(vha, &ea); 2850 break; 2851 2852 case CS_IOCB_ERROR: 2853 switch (le32_to_cpu(fw_status[1])) { 2854 case LSC_SCODE_PORTID_USED: 2855 lid = le32_to_cpu(fw_status[2]) & 0xffff; 2856 qlt_find_sess_invalidate_other(vha, 2857 wwn_to_u64(fcport->port_name), 2858 fcport->d_id, lid, &conflict_fcport); 2859 if (conflict_fcport) { 2860 /* 2861 * Another fcport shares the same 2862 * loop_id & nport id; conflict 2863 * fcport needs to finish cleanup 2864 * before this fcport can proceed 2865 * to login. 2866 */ 2867 conflict_fcport->conflict = fcport; 2868 fcport->login_pause = 1; 2869 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2870 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", 2871 __func__, __LINE__, 2872 fcport->port_name, 2873 fcport->d_id.b24, lid); 2874 } else { 2875 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2876 "%s %d %8phC pid %06x inuse with lid %#x sched del\n", 2877 __func__, __LINE__, 2878 fcport->port_name, 2879 fcport->d_id.b24, lid); 2880 qla2x00_clear_loop_id(fcport); 2881 set_bit(lid, vha->hw->loop_id_map); 2882 fcport->loop_id = lid; 2883 fcport->keep_nport_handle = 0; 2884 qlt_schedule_sess_for_deletion(fcport); 2885 } 2886 break; 2887 2888 case LSC_SCODE_NPORT_USED: 2889 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) 2890 & 0xff; 2891 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) 2892 & 0xff; 2893 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; 2894 cid.b.rsvd_1 = 0; 2895 2896 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2897 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2898 __func__, __LINE__, fcport->port_name, 2899 fcport->loop_id, cid.b24); 2900 set_bit(fcport->loop_id, 2901 vha->hw->loop_id_map); 2902 fcport->loop_id = FC_NO_LOOP_ID; 2903 qla24xx_post_gnl_work(vha, fcport); 2904 break; 2905 2906 case LSC_SCODE_NOXCB: 2907 vha->hw->exch_starvation++; 2908 if (vha->hw->exch_starvation > 5) { 2909 ql_log(ql_log_warn, vha, 0xd046, 2910 "Exchange starvation. Resetting RISC\n"); 2911 vha->hw->exch_starvation = 0; 2912 set_bit(ISP_ABORT_NEEDED, 2913 &vha->dpc_flags); 2914 qla2xxx_wake_dpc(vha); 2915 } 2916 fallthrough; 2917 default: 2918 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2919 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", 2920 __func__, sp->fcport->port_name, 2921 fw_status[0], fw_status[1], fw_status[2]); 2922 2923 fcport->flags &= ~FCF_ASYNC_SENT; 2924 qla2x00_set_fcport_disc_state(fcport, 2925 DSC_LOGIN_FAILED); 2926 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2927 break; 2928 } 2929 break; 2930 2931 default: 2932 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2933 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", 2934 __func__, sp->fcport->port_name, 2935 fw_status[0], fw_status[1], fw_status[2]); 2936 2937 sp->fcport->flags &= ~FCF_ASYNC_SENT; 2938 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); 2939 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2940 break; 2941 } 2942 2943 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 2944 if (!e) { 2945 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2946 2947 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 2948 sp->free(sp); 2949 return; 2950 } 2951 e->u.iosb.sp = sp; 2952 qla2x00_post_work(vha, e); 2953 } 2954 } 2955 2956 int 2957 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, 2958 fc_port_t *fcport, bool wait) 2959 { 2960 srb_t *sp; 2961 struct srb_iocb *elsio = NULL; 2962 struct qla_hw_data *ha = vha->hw; 2963 int rval = QLA_SUCCESS; 2964 void *ptr, *resp_ptr; 2965 2966 /* Alloc SRB structure */ 2967 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2968 if (!sp) { 2969 ql_log(ql_log_info, vha, 0x70e6, 2970 "SRB allocation failed\n"); 2971 fcport->flags &= ~FCF_ASYNC_ACTIVE; 2972 return -ENOMEM; 2973 } 2974 2975 fcport->flags |= FCF_ASYNC_SENT; 2976 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 2977 elsio = &sp->u.iocb_cmd; 2978 ql_dbg(ql_dbg_io, vha, 0x3073, 2979 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); 2980 2981 sp->type = SRB_ELS_DCMD; 2982 sp->name = "ELS_DCMD"; 2983 sp->fcport = fcport; 2984 2985 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; 2986 if (wait) 2987 sp->flags = SRB_WAKEUP_ON_COMP; 2988 2989 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); 2990 2991 sp->done = qla2x00_els_dcmd2_sp_done; 2992 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; 2993 2994 ptr = elsio->u.els_plogi.els_plogi_pyld = 2995 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, 2996 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 2997 2998 if (!elsio->u.els_plogi.els_plogi_pyld) { 2999 rval = QLA_FUNCTION_FAILED; 3000 goto out; 3001 } 3002 3003 resp_ptr = elsio->u.els_plogi.els_resp_pyld = 3004 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, 3005 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); 3006 3007 if (!elsio->u.els_plogi.els_resp_pyld) { 3008 rval = QLA_FUNCTION_FAILED; 3009 goto out; 3010 } 3011 3012 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); 3013 3014 memset(ptr, 0, sizeof(struct els_plogi_payload)); 3015 memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); 3016 memcpy(elsio->u.els_plogi.els_plogi_pyld->data, 3017 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); 3018 3019 elsio->u.els_plogi.els_cmd = els_opcode; 3020 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; 3021 3022 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); 3023 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, 3024 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 3025 sizeof(*elsio->u.els_plogi.els_plogi_pyld)); 3026 3027 init_completion(&elsio->u.els_plogi.comp); 3028 rval = qla2x00_start_sp(sp); 3029 if (rval != QLA_SUCCESS) { 3030 rval = QLA_FUNCTION_FAILED; 3031 } else { 3032 ql_dbg(ql_dbg_disc, vha, 0x3074, 3033 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", 3034 sp->name, sp->handle, fcport->loop_id, 3035 fcport->d_id.b24, vha->d_id.b24); 3036 } 3037 3038 if (wait) { 3039 wait_for_completion(&elsio->u.els_plogi.comp); 3040 3041 if (elsio->u.els_plogi.comp_status != CS_COMPLETE) 3042 rval = QLA_FUNCTION_FAILED; 3043 } else { 3044 goto done; 3045 } 3046 3047 out: 3048 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 3049 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 3050 sp->free(sp); 3051 done: 3052 return rval; 3053 } 3054 3055 static void 3056 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 3057 { 3058 struct bsg_job *bsg_job = sp->u.bsg_job; 3059 struct fc_bsg_request *bsg_request = bsg_job->request; 3060 3061 els_iocb->entry_type = ELS_IOCB_TYPE; 3062 els_iocb->entry_count = 1; 3063 els_iocb->sys_define = 0; 3064 els_iocb->entry_status = 0; 3065 els_iocb->handle = sp->handle; 3066 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3067 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3068 els_iocb->vp_index = sp->vha->vp_idx; 3069 els_iocb->sof_type = EST_SOFI3; 3070 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3071 3072 els_iocb->opcode = 3073 sp->type == SRB_ELS_CMD_RPT ? 3074 bsg_request->rqst_data.r_els.els_code : 3075 bsg_request->rqst_data.h_els.command_code; 3076 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 3077 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 3078 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 3079 els_iocb->control_flags = 0; 3080 els_iocb->rx_byte_count = 3081 cpu_to_le32(bsg_job->reply_payload.payload_len); 3082 els_iocb->tx_byte_count = 3083 cpu_to_le32(bsg_job->request_payload.payload_len); 3084 3085 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3086 &els_iocb->tx_address); 3087 els_iocb->tx_len = cpu_to_le32(sg_dma_len 3088 (bsg_job->request_payload.sg_list)); 3089 3090 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3091 &els_iocb->rx_address); 3092 els_iocb->rx_len = cpu_to_le32(sg_dma_len 3093 (bsg_job->reply_payload.sg_list)); 3094 3095 sp->vha->qla_stats.control_requests++; 3096 } 3097 3098 static void 3099 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 3100 { 3101 uint16_t avail_dsds; 3102 struct dsd64 *cur_dsd; 3103 struct scatterlist *sg; 3104 int index; 3105 uint16_t tot_dsds; 3106 scsi_qla_host_t *vha = sp->vha; 3107 struct qla_hw_data *ha = vha->hw; 3108 struct bsg_job *bsg_job = sp->u.bsg_job; 3109 int entry_count = 1; 3110 3111 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 3112 ct_iocb->entry_type = CT_IOCB_TYPE; 3113 ct_iocb->entry_status = 0; 3114 ct_iocb->handle1 = sp->handle; 3115 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 3116 ct_iocb->status = cpu_to_le16(0); 3117 ct_iocb->control_flags = cpu_to_le16(0); 3118 ct_iocb->timeout = 0; 3119 ct_iocb->cmd_dsd_count = 3120 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3121 ct_iocb->total_dsd_count = 3122 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 3123 ct_iocb->req_bytecount = 3124 cpu_to_le32(bsg_job->request_payload.payload_len); 3125 ct_iocb->rsp_bytecount = 3126 cpu_to_le32(bsg_job->reply_payload.payload_len); 3127 3128 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3129 &ct_iocb->req_dsd.address); 3130 ct_iocb->req_dsd.length = ct_iocb->req_bytecount; 3131 3132 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3133 &ct_iocb->rsp_dsd.address); 3134 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; 3135 3136 avail_dsds = 1; 3137 cur_dsd = &ct_iocb->rsp_dsd; 3138 index = 0; 3139 tot_dsds = bsg_job->reply_payload.sg_cnt; 3140 3141 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 3142 cont_a64_entry_t *cont_pkt; 3143 3144 /* Allocate additional continuation packets? */ 3145 if (avail_dsds == 0) { 3146 /* 3147 * Five DSDs are available in the Cont. 3148 * Type 1 IOCB. 3149 */ 3150 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3151 vha->hw->req_q_map[0]); 3152 cur_dsd = cont_pkt->dsd; 3153 avail_dsds = 5; 3154 entry_count++; 3155 } 3156 3157 append_dsd64(&cur_dsd, sg); 3158 avail_dsds--; 3159 } 3160 ct_iocb->entry_count = entry_count; 3161 3162 sp->vha->qla_stats.control_requests++; 3163 } 3164 3165 static void 3166 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 3167 { 3168 uint16_t avail_dsds; 3169 struct dsd64 *cur_dsd; 3170 struct scatterlist *sg; 3171 int index; 3172 uint16_t cmd_dsds, rsp_dsds; 3173 scsi_qla_host_t *vha = sp->vha; 3174 struct qla_hw_data *ha = vha->hw; 3175 struct bsg_job *bsg_job = sp->u.bsg_job; 3176 int entry_count = 1; 3177 cont_a64_entry_t *cont_pkt = NULL; 3178 3179 ct_iocb->entry_type = CT_IOCB_TYPE; 3180 ct_iocb->entry_status = 0; 3181 ct_iocb->sys_define = 0; 3182 ct_iocb->handle = sp->handle; 3183 3184 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3185 ct_iocb->vp_index = sp->vha->vp_idx; 3186 ct_iocb->comp_status = cpu_to_le16(0); 3187 3188 cmd_dsds = bsg_job->request_payload.sg_cnt; 3189 rsp_dsds = bsg_job->reply_payload.sg_cnt; 3190 3191 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); 3192 ct_iocb->timeout = 0; 3193 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); 3194 ct_iocb->cmd_byte_count = 3195 cpu_to_le32(bsg_job->request_payload.payload_len); 3196 3197 avail_dsds = 2; 3198 cur_dsd = ct_iocb->dsd; 3199 index = 0; 3200 3201 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3202 /* Allocate additional continuation packets? */ 3203 if (avail_dsds == 0) { 3204 /* 3205 * Five DSDs are available in the Cont. 3206 * Type 1 IOCB. 3207 */ 3208 cont_pkt = qla2x00_prep_cont_type1_iocb( 3209 vha, ha->req_q_map[0]); 3210 cur_dsd = cont_pkt->dsd; 3211 avail_dsds = 5; 3212 entry_count++; 3213 } 3214 3215 append_dsd64(&cur_dsd, sg); 3216 avail_dsds--; 3217 } 3218 3219 index = 0; 3220 3221 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3222 /* Allocate additional continuation packets? */ 3223 if (avail_dsds == 0) { 3224 /* 3225 * Five DSDs are available in the Cont. 3226 * Type 1 IOCB. 3227 */ 3228 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3229 ha->req_q_map[0]); 3230 cur_dsd = cont_pkt->dsd; 3231 avail_dsds = 5; 3232 entry_count++; 3233 } 3234 3235 append_dsd64(&cur_dsd, sg); 3236 avail_dsds--; 3237 } 3238 ct_iocb->entry_count = entry_count; 3239 } 3240 3241 /* 3242 * qla82xx_start_scsi() - Send a SCSI command to the ISP 3243 * @sp: command to send to the ISP 3244 * 3245 * Returns non-zero if a failure occurred, else zero. 3246 */ 3247 int 3248 qla82xx_start_scsi(srb_t *sp) 3249 { 3250 int nseg; 3251 unsigned long flags; 3252 struct scsi_cmnd *cmd; 3253 uint32_t *clr_ptr; 3254 uint32_t handle; 3255 uint16_t cnt; 3256 uint16_t req_cnt; 3257 uint16_t tot_dsds; 3258 struct device_reg_82xx __iomem *reg; 3259 uint32_t dbval; 3260 __be32 *fcp_dl; 3261 uint8_t additional_cdb_len; 3262 struct ct6_dsd *ctx; 3263 struct scsi_qla_host *vha = sp->vha; 3264 struct qla_hw_data *ha = vha->hw; 3265 struct req_que *req = NULL; 3266 struct rsp_que *rsp = NULL; 3267 3268 /* Setup device pointers. */ 3269 reg = &ha->iobase->isp82; 3270 cmd = GET_CMD_SP(sp); 3271 req = vha->req; 3272 rsp = ha->rsp_q_map[0]; 3273 3274 /* So we know we haven't pci_map'ed anything yet */ 3275 tot_dsds = 0; 3276 3277 dbval = 0x04 | (ha->portnum << 5); 3278 3279 /* Send marker if required */ 3280 if (vha->marker_needed != 0) { 3281 if (qla2x00_marker(vha, ha->base_qpair, 3282 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 3283 ql_log(ql_log_warn, vha, 0x300c, 3284 "qla2x00_marker failed for cmd=%p.\n", cmd); 3285 return QLA_FUNCTION_FAILED; 3286 } 3287 vha->marker_needed = 0; 3288 } 3289 3290 /* Acquire ring specific lock */ 3291 spin_lock_irqsave(&ha->hardware_lock, flags); 3292 3293 handle = qla2xxx_get_next_handle(req); 3294 if (handle == 0) 3295 goto queuing_error; 3296 3297 /* Map the sg table so we have an accurate count of sg entries needed */ 3298 if (scsi_sg_count(cmd)) { 3299 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3300 scsi_sg_count(cmd), cmd->sc_data_direction); 3301 if (unlikely(!nseg)) 3302 goto queuing_error; 3303 } else 3304 nseg = 0; 3305 3306 tot_dsds = nseg; 3307 3308 if (tot_dsds > ql2xshiftctondsd) { 3309 struct cmd_type_6 *cmd_pkt; 3310 uint16_t more_dsd_lists = 0; 3311 struct dsd_dma *dsd_ptr; 3312 uint16_t i; 3313 3314 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 3315 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 3316 ql_dbg(ql_dbg_io, vha, 0x300d, 3317 "Num of DSD list %d is than %d for cmd=%p.\n", 3318 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 3319 cmd); 3320 goto queuing_error; 3321 } 3322 3323 if (more_dsd_lists <= ha->gbl_dsd_avail) 3324 goto sufficient_dsds; 3325 else 3326 more_dsd_lists -= ha->gbl_dsd_avail; 3327 3328 for (i = 0; i < more_dsd_lists; i++) { 3329 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 3330 if (!dsd_ptr) { 3331 ql_log(ql_log_fatal, vha, 0x300e, 3332 "Failed to allocate memory for dsd_dma " 3333 "for cmd=%p.\n", cmd); 3334 goto queuing_error; 3335 } 3336 3337 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 3338 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 3339 if (!dsd_ptr->dsd_addr) { 3340 kfree(dsd_ptr); 3341 ql_log(ql_log_fatal, vha, 0x300f, 3342 "Failed to allocate memory for dsd_addr " 3343 "for cmd=%p.\n", cmd); 3344 goto queuing_error; 3345 } 3346 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 3347 ha->gbl_dsd_avail++; 3348 } 3349 3350 sufficient_dsds: 3351 req_cnt = 1; 3352 3353 if (req->cnt < (req_cnt + 2)) { 3354 cnt = (uint16_t)rd_reg_dword_relaxed( 3355 ®->req_q_out[0]); 3356 if (req->ring_index < cnt) 3357 req->cnt = cnt - req->ring_index; 3358 else 3359 req->cnt = req->length - 3360 (req->ring_index - cnt); 3361 if (req->cnt < (req_cnt + 2)) 3362 goto queuing_error; 3363 } 3364 3365 ctx = sp->u.scmd.ct6_ctx = 3366 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 3367 if (!ctx) { 3368 ql_log(ql_log_fatal, vha, 0x3010, 3369 "Failed to allocate ctx for cmd=%p.\n", cmd); 3370 goto queuing_error; 3371 } 3372 3373 memset(ctx, 0, sizeof(struct ct6_dsd)); 3374 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, 3375 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 3376 if (!ctx->fcp_cmnd) { 3377 ql_log(ql_log_fatal, vha, 0x3011, 3378 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 3379 goto queuing_error; 3380 } 3381 3382 /* Initialize the DSD list and dma handle */ 3383 INIT_LIST_HEAD(&ctx->dsd_list); 3384 ctx->dsd_use_cnt = 0; 3385 3386 if (cmd->cmd_len > 16) { 3387 additional_cdb_len = cmd->cmd_len - 16; 3388 if ((cmd->cmd_len % 4) != 0) { 3389 /* SCSI command bigger than 16 bytes must be 3390 * multiple of 4 3391 */ 3392 ql_log(ql_log_warn, vha, 0x3012, 3393 "scsi cmd len %d not multiple of 4 " 3394 "for cmd=%p.\n", cmd->cmd_len, cmd); 3395 goto queuing_error_fcp_cmnd; 3396 } 3397 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 3398 } else { 3399 additional_cdb_len = 0; 3400 ctx->fcp_cmnd_len = 12 + 16 + 4; 3401 } 3402 3403 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 3404 cmd_pkt->handle = make_handle(req->id, handle); 3405 3406 /* Zero out remaining portion of packet. */ 3407 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3408 clr_ptr = (uint32_t *)cmd_pkt + 2; 3409 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3410 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3411 3412 /* Set NPORT-ID and LUN number*/ 3413 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3414 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3415 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3416 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3417 cmd_pkt->vp_index = sp->vha->vp_idx; 3418 3419 /* Build IOCB segments */ 3420 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 3421 goto queuing_error_fcp_cmnd; 3422 3423 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3424 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 3425 3426 /* build FCP_CMND IU */ 3427 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 3428 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 3429 3430 if (cmd->sc_data_direction == DMA_TO_DEVICE) 3431 ctx->fcp_cmnd->additional_cdb_len |= 1; 3432 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 3433 ctx->fcp_cmnd->additional_cdb_len |= 2; 3434 3435 /* Populate the FCP_PRIO. */ 3436 if (ha->flags.fcp_prio_enabled) 3437 ctx->fcp_cmnd->task_attribute |= 3438 sp->fcport->fcp_prio << 3; 3439 3440 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 3441 3442 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + 3443 additional_cdb_len); 3444 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 3445 3446 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 3447 put_unaligned_le64(ctx->fcp_cmnd_dma, 3448 &cmd_pkt->fcp_cmnd_dseg_address); 3449 3450 sp->flags |= SRB_FCP_CMND_DMA_VALID; 3451 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3452 /* Set total data segment count. */ 3453 cmd_pkt->entry_count = (uint8_t)req_cnt; 3454 /* Specify response queue number where 3455 * completion should happen 3456 */ 3457 cmd_pkt->entry_status = (uint8_t) rsp->id; 3458 } else { 3459 struct cmd_type_7 *cmd_pkt; 3460 3461 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3462 if (req->cnt < (req_cnt + 2)) { 3463 cnt = (uint16_t)rd_reg_dword_relaxed( 3464 ®->req_q_out[0]); 3465 if (req->ring_index < cnt) 3466 req->cnt = cnt - req->ring_index; 3467 else 3468 req->cnt = req->length - 3469 (req->ring_index - cnt); 3470 } 3471 if (req->cnt < (req_cnt + 2)) 3472 goto queuing_error; 3473 3474 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 3475 cmd_pkt->handle = make_handle(req->id, handle); 3476 3477 /* Zero out remaining portion of packet. */ 3478 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3479 clr_ptr = (uint32_t *)cmd_pkt + 2; 3480 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3481 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3482 3483 /* Set NPORT-ID and LUN number*/ 3484 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3485 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3486 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3487 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3488 cmd_pkt->vp_index = sp->vha->vp_idx; 3489 3490 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3491 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 3492 sizeof(cmd_pkt->lun)); 3493 3494 /* Populate the FCP_PRIO. */ 3495 if (ha->flags.fcp_prio_enabled) 3496 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 3497 3498 /* Load SCSI command packet. */ 3499 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 3500 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 3501 3502 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3503 3504 /* Build IOCB segments */ 3505 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 3506 3507 /* Set total data segment count. */ 3508 cmd_pkt->entry_count = (uint8_t)req_cnt; 3509 /* Specify response queue number where 3510 * completion should happen. 3511 */ 3512 cmd_pkt->entry_status = (uint8_t) rsp->id; 3513 3514 } 3515 /* Build command packet. */ 3516 req->current_outstanding_cmd = handle; 3517 req->outstanding_cmds[handle] = sp; 3518 sp->handle = handle; 3519 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3520 req->cnt -= req_cnt; 3521 wmb(); 3522 3523 /* Adjust ring index. */ 3524 req->ring_index++; 3525 if (req->ring_index == req->length) { 3526 req->ring_index = 0; 3527 req->ring_ptr = req->ring; 3528 } else 3529 req->ring_ptr++; 3530 3531 sp->flags |= SRB_DMA_VALID; 3532 3533 /* Set chip new ring index. */ 3534 /* write, read and verify logic */ 3535 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3536 if (ql2xdbwr) 3537 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3538 else { 3539 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3540 wmb(); 3541 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { 3542 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3543 wmb(); 3544 } 3545 } 3546 3547 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3548 if (vha->flags.process_response_queue && 3549 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3550 qla24xx_process_response_queue(vha, rsp); 3551 3552 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3553 return QLA_SUCCESS; 3554 3555 queuing_error_fcp_cmnd: 3556 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3557 queuing_error: 3558 if (tot_dsds) 3559 scsi_dma_unmap(cmd); 3560 3561 if (sp->u.scmd.crc_ctx) { 3562 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); 3563 sp->u.scmd.crc_ctx = NULL; 3564 } 3565 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3566 3567 return QLA_FUNCTION_FAILED; 3568 } 3569 3570 static void 3571 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3572 { 3573 struct srb_iocb *aio = &sp->u.iocb_cmd; 3574 scsi_qla_host_t *vha = sp->vha; 3575 struct req_que *req = sp->qpair->req; 3576 srb_t *orig_sp = sp->cmd_sp; 3577 3578 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3579 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3580 abt_iocb->entry_count = 1; 3581 abt_iocb->handle = make_handle(req->id, sp->handle); 3582 if (sp->fcport) { 3583 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3584 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3585 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3586 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3587 } 3588 abt_iocb->handle_to_abort = 3589 make_handle(le16_to_cpu(aio->u.abt.req_que_no), 3590 aio->u.abt.cmd_hndl); 3591 abt_iocb->vp_index = vha->vp_idx; 3592 abt_iocb->req_que_no = aio->u.abt.req_que_no; 3593 3594 /* need to pass original sp */ 3595 if (orig_sp) 3596 qla_nvme_abort_set_option(abt_iocb, orig_sp); 3597 3598 /* Send the command to the firmware */ 3599 wmb(); 3600 } 3601 3602 static void 3603 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) 3604 { 3605 int i, sz; 3606 3607 mbx->entry_type = MBX_IOCB_TYPE; 3608 mbx->handle = sp->handle; 3609 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); 3610 3611 for (i = 0; i < sz; i++) 3612 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; 3613 } 3614 3615 static void 3616 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) 3617 { 3618 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; 3619 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); 3620 ct_pkt->handle = sp->handle; 3621 } 3622 3623 static void qla2x00_send_notify_ack_iocb(srb_t *sp, 3624 struct nack_to_isp *nack) 3625 { 3626 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; 3627 3628 nack->entry_type = NOTIFY_ACK_TYPE; 3629 nack->entry_count = 1; 3630 nack->ox_id = ntfy->ox_id; 3631 3632 nack->u.isp24.handle = sp->handle; 3633 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3634 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3635 nack->u.isp24.flags = ntfy->u.isp24.flags & 3636 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3637 } 3638 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3639 nack->u.isp24.status = ntfy->u.isp24.status; 3640 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3641 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3642 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3643 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3644 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3645 nack->u.isp24.srr_flags = 0; 3646 nack->u.isp24.srr_reject_code = 0; 3647 nack->u.isp24.srr_reject_code_expl = 0; 3648 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3649 } 3650 3651 /* 3652 * Build NVME LS request 3653 */ 3654 static void 3655 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) 3656 { 3657 struct srb_iocb *nvme; 3658 3659 nvme = &sp->u.iocb_cmd; 3660 cmd_pkt->entry_type = PT_LS4_REQUEST; 3661 cmd_pkt->entry_count = 1; 3662 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); 3663 3664 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); 3665 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3666 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 3667 3668 cmd_pkt->tx_dseg_count = cpu_to_le16(1); 3669 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); 3670 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); 3671 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); 3672 3673 cmd_pkt->rx_dseg_count = cpu_to_le16(1); 3674 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); 3675 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); 3676 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); 3677 } 3678 3679 static void 3680 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) 3681 { 3682 int map, pos; 3683 3684 vce->entry_type = VP_CTRL_IOCB_TYPE; 3685 vce->handle = sp->handle; 3686 vce->entry_count = 1; 3687 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); 3688 vce->vp_count = cpu_to_le16(1); 3689 3690 /* 3691 * index map in firmware starts with 1; decrement index 3692 * this is ok as we never use index 0 3693 */ 3694 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; 3695 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; 3696 vce->vp_idx_map[map] |= 1 << pos; 3697 } 3698 3699 static void 3700 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) 3701 { 3702 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 3703 logio->control_flags = 3704 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); 3705 3706 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3707 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 3708 logio->port_id[1] = sp->fcport->d_id.b.area; 3709 logio->port_id[2] = sp->fcport->d_id.b.domain; 3710 logio->vp_index = sp->fcport->vha->vp_idx; 3711 } 3712 3713 int 3714 qla2x00_start_sp(srb_t *sp) 3715 { 3716 int rval = QLA_SUCCESS; 3717 scsi_qla_host_t *vha = sp->vha; 3718 struct qla_hw_data *ha = vha->hw; 3719 struct qla_qpair *qp = sp->qpair; 3720 void *pkt; 3721 unsigned long flags; 3722 3723 spin_lock_irqsave(qp->qp_lock_ptr, flags); 3724 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); 3725 if (!pkt) { 3726 rval = EAGAIN; 3727 ql_log(ql_log_warn, vha, 0x700c, 3728 "qla2x00_alloc_iocbs failed.\n"); 3729 goto done; 3730 } 3731 3732 switch (sp->type) { 3733 case SRB_LOGIN_CMD: 3734 IS_FWI2_CAPABLE(ha) ? 3735 qla24xx_login_iocb(sp, pkt) : 3736 qla2x00_login_iocb(sp, pkt); 3737 break; 3738 case SRB_PRLI_CMD: 3739 qla24xx_prli_iocb(sp, pkt); 3740 break; 3741 case SRB_LOGOUT_CMD: 3742 IS_FWI2_CAPABLE(ha) ? 3743 qla24xx_logout_iocb(sp, pkt) : 3744 qla2x00_logout_iocb(sp, pkt); 3745 break; 3746 case SRB_ELS_CMD_RPT: 3747 case SRB_ELS_CMD_HST: 3748 qla24xx_els_iocb(sp, pkt); 3749 break; 3750 case SRB_CT_CMD: 3751 IS_FWI2_CAPABLE(ha) ? 3752 qla24xx_ct_iocb(sp, pkt) : 3753 qla2x00_ct_iocb(sp, pkt); 3754 break; 3755 case SRB_ADISC_CMD: 3756 IS_FWI2_CAPABLE(ha) ? 3757 qla24xx_adisc_iocb(sp, pkt) : 3758 qla2x00_adisc_iocb(sp, pkt); 3759 break; 3760 case SRB_TM_CMD: 3761 IS_QLAFX00(ha) ? 3762 qlafx00_tm_iocb(sp, pkt) : 3763 qla24xx_tm_iocb(sp, pkt); 3764 break; 3765 case SRB_FXIOCB_DCMD: 3766 case SRB_FXIOCB_BCMD: 3767 qlafx00_fxdisc_iocb(sp, pkt); 3768 break; 3769 case SRB_NVME_LS: 3770 qla_nvme_ls(sp, pkt); 3771 break; 3772 case SRB_ABT_CMD: 3773 IS_QLAFX00(ha) ? 3774 qlafx00_abort_iocb(sp, pkt) : 3775 qla24xx_abort_iocb(sp, pkt); 3776 break; 3777 case SRB_ELS_DCMD: 3778 qla24xx_els_logo_iocb(sp, pkt); 3779 break; 3780 case SRB_CT_PTHRU_CMD: 3781 qla2x00_ctpthru_cmd_iocb(sp, pkt); 3782 break; 3783 case SRB_MB_IOCB: 3784 qla2x00_mb_iocb(sp, pkt); 3785 break; 3786 case SRB_NACK_PLOGI: 3787 case SRB_NACK_PRLI: 3788 case SRB_NACK_LOGO: 3789 qla2x00_send_notify_ack_iocb(sp, pkt); 3790 break; 3791 case SRB_CTRL_VP: 3792 qla25xx_ctrlvp_iocb(sp, pkt); 3793 break; 3794 case SRB_PRLO_CMD: 3795 qla24xx_prlo_iocb(sp, pkt); 3796 break; 3797 default: 3798 break; 3799 } 3800 3801 if (sp->start_timer) 3802 add_timer(&sp->u.iocb_cmd.timer); 3803 3804 wmb(); 3805 qla2x00_start_iocbs(vha, qp->req); 3806 done: 3807 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 3808 return rval; 3809 } 3810 3811 static void 3812 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 3813 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3814 { 3815 uint16_t avail_dsds; 3816 struct dsd64 *cur_dsd; 3817 uint32_t req_data_len = 0; 3818 uint32_t rsp_data_len = 0; 3819 struct scatterlist *sg; 3820 int index; 3821 int entry_count = 1; 3822 struct bsg_job *bsg_job = sp->u.bsg_job; 3823 3824 /*Update entry type to indicate bidir command */ 3825 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); 3826 3827 /* Set the transfer direction, in this set both flags 3828 * Also set the BD_WRAP_BACK flag, firmware will take care 3829 * assigning DID=SID for outgoing pkts. 3830 */ 3831 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3832 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3833 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 3834 BD_WRAP_BACK); 3835 3836 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 3837 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 3838 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 3839 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3840 3841 vha->bidi_stats.transfer_bytes += req_data_len; 3842 vha->bidi_stats.io_count++; 3843 3844 vha->qla_stats.output_bytes += req_data_len; 3845 vha->qla_stats.output_requests++; 3846 3847 /* Only one dsd is available for bidirectional IOCB, remaining dsds 3848 * are bundled in continuation iocb 3849 */ 3850 avail_dsds = 1; 3851 cur_dsd = &cmd_pkt->fcp_dsd; 3852 3853 index = 0; 3854 3855 for_each_sg(bsg_job->request_payload.sg_list, sg, 3856 bsg_job->request_payload.sg_cnt, index) { 3857 cont_a64_entry_t *cont_pkt; 3858 3859 /* Allocate additional continuation packets */ 3860 if (avail_dsds == 0) { 3861 /* Continuation type 1 IOCB can accomodate 3862 * 5 DSDS 3863 */ 3864 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3865 cur_dsd = cont_pkt->dsd; 3866 avail_dsds = 5; 3867 entry_count++; 3868 } 3869 append_dsd64(&cur_dsd, sg); 3870 avail_dsds--; 3871 } 3872 /* For read request DSD will always goes to continuation IOCB 3873 * and follow the write DSD. If there is room on the current IOCB 3874 * then it is added to that IOCB else new continuation IOCB is 3875 * allocated. 3876 */ 3877 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3878 bsg_job->reply_payload.sg_cnt, index) { 3879 cont_a64_entry_t *cont_pkt; 3880 3881 /* Allocate additional continuation packets */ 3882 if (avail_dsds == 0) { 3883 /* Continuation type 1 IOCB can accomodate 3884 * 5 DSDS 3885 */ 3886 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3887 cur_dsd = cont_pkt->dsd; 3888 avail_dsds = 5; 3889 entry_count++; 3890 } 3891 append_dsd64(&cur_dsd, sg); 3892 avail_dsds--; 3893 } 3894 /* This value should be same as number of IOCB required for this cmd */ 3895 cmd_pkt->entry_count = entry_count; 3896 } 3897 3898 int 3899 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 3900 { 3901 3902 struct qla_hw_data *ha = vha->hw; 3903 unsigned long flags; 3904 uint32_t handle; 3905 uint16_t req_cnt; 3906 uint16_t cnt; 3907 uint32_t *clr_ptr; 3908 struct cmd_bidir *cmd_pkt = NULL; 3909 struct rsp_que *rsp; 3910 struct req_que *req; 3911 int rval = EXT_STATUS_OK; 3912 3913 rval = QLA_SUCCESS; 3914 3915 rsp = ha->rsp_q_map[0]; 3916 req = vha->req; 3917 3918 /* Send marker if required */ 3919 if (vha->marker_needed != 0) { 3920 if (qla2x00_marker(vha, ha->base_qpair, 3921 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 3922 return EXT_STATUS_MAILBOX; 3923 vha->marker_needed = 0; 3924 } 3925 3926 /* Acquire ring specific lock */ 3927 spin_lock_irqsave(&ha->hardware_lock, flags); 3928 3929 handle = qla2xxx_get_next_handle(req); 3930 if (handle == 0) { 3931 rval = EXT_STATUS_BUSY; 3932 goto queuing_error; 3933 } 3934 3935 /* Calculate number of IOCB required */ 3936 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3937 3938 /* Check for room on request queue. */ 3939 if (req->cnt < req_cnt + 2) { 3940 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 3941 rd_reg_dword_relaxed(req->req_q_out); 3942 if (req->ring_index < cnt) 3943 req->cnt = cnt - req->ring_index; 3944 else 3945 req->cnt = req->length - 3946 (req->ring_index - cnt); 3947 } 3948 if (req->cnt < req_cnt + 2) { 3949 rval = EXT_STATUS_BUSY; 3950 goto queuing_error; 3951 } 3952 3953 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 3954 cmd_pkt->handle = make_handle(req->id, handle); 3955 3956 /* Zero out remaining portion of packet. */ 3957 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3958 clr_ptr = (uint32_t *)cmd_pkt + 2; 3959 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3960 3961 /* Set NPORT-ID (of vha)*/ 3962 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 3963 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 3964 cmd_pkt->port_id[1] = vha->d_id.b.area; 3965 cmd_pkt->port_id[2] = vha->d_id.b.domain; 3966 3967 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 3968 cmd_pkt->entry_status = (uint8_t) rsp->id; 3969 /* Build command packet. */ 3970 req->current_outstanding_cmd = handle; 3971 req->outstanding_cmds[handle] = sp; 3972 sp->handle = handle; 3973 req->cnt -= req_cnt; 3974 3975 /* Send the command to the firmware */ 3976 wmb(); 3977 qla2x00_start_iocbs(vha, req); 3978 queuing_error: 3979 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3980 return rval; 3981 } 3982