1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/blkdev.h> 10 #include <linux/delay.h> 11 12 #include <scsi/scsi_tcq.h> 13 14 /** 15 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * @sp: SCSI command 17 * 18 * Returns the proper CF_* direction based on CDB. 19 */ 20 static inline uint16_t 21 qla2x00_get_cmd_direction(srb_t *sp) 22 { 23 uint16_t cflags; 24 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 25 struct scsi_qla_host *vha = sp->vha; 26 27 cflags = 0; 28 29 /* Set transfer direction */ 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 31 cflags = CF_WRITE; 32 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 33 vha->qla_stats.output_requests++; 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 cflags = CF_READ; 36 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 37 vha->qla_stats.input_requests++; 38 } 39 return (cflags); 40 } 41 42 /** 43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 44 * Continuation Type 0 IOCBs to allocate. 45 * 46 * @dsds: number of data segment descriptors needed 47 * 48 * Returns the number of IOCB entries needed to store @dsds. 49 */ 50 uint16_t 51 qla2x00_calc_iocbs_32(uint16_t dsds) 52 { 53 uint16_t iocbs; 54 55 iocbs = 1; 56 if (dsds > 3) { 57 iocbs += (dsds - 3) / 7; 58 if ((dsds - 3) % 7) 59 iocbs++; 60 } 61 return (iocbs); 62 } 63 64 /** 65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 66 * Continuation Type 1 IOCBs to allocate. 67 * 68 * @dsds: number of data segment descriptors needed 69 * 70 * Returns the number of IOCB entries needed to store @dsds. 71 */ 72 uint16_t 73 qla2x00_calc_iocbs_64(uint16_t dsds) 74 { 75 uint16_t iocbs; 76 77 iocbs = 1; 78 if (dsds > 2) { 79 iocbs += (dsds - 2) / 5; 80 if ((dsds - 2) % 5) 81 iocbs++; 82 } 83 return (iocbs); 84 } 85 86 /** 87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 88 * @vha: HA context 89 * 90 * Returns a pointer to the Continuation Type 0 IOCB packet. 91 */ 92 static inline cont_entry_t * 93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 94 { 95 cont_entry_t *cont_pkt; 96 struct req_que *req = vha->req; 97 /* Adjust ring index. */ 98 req->ring_index++; 99 if (req->ring_index == req->length) { 100 req->ring_index = 0; 101 req->ring_ptr = req->ring; 102 } else { 103 req->ring_ptr++; 104 } 105 106 cont_pkt = (cont_entry_t *)req->ring_ptr; 107 108 /* Load packet defaults. */ 109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); 110 111 return (cont_pkt); 112 } 113 114 /** 115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 116 * @vha: HA context 117 * @req: request queue 118 * 119 * Returns a pointer to the continuation type 1 IOCB packet. 120 */ 121 static inline cont_a64_entry_t * 122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 123 { 124 cont_a64_entry_t *cont_pkt; 125 126 /* Adjust ring index. */ 127 req->ring_index++; 128 if (req->ring_index == req->length) { 129 req->ring_index = 0; 130 req->ring_ptr = req->ring; 131 } else { 132 req->ring_ptr++; 133 } 134 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 136 137 /* Load packet defaults. */ 138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : 139 CONTINUE_A64_TYPE, &cont_pkt->entry_type); 140 141 return (cont_pkt); 142 } 143 144 inline int 145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 146 { 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 148 uint8_t guard = scsi_host_get_guard(cmd->device->host); 149 150 /* We always use DIFF Bundling for best performance */ 151 *fw_prot_opts = 0; 152 153 /* Translate SCSI opcode to a protection opcode */ 154 switch (scsi_get_prot_op(cmd)) { 155 case SCSI_PROT_READ_STRIP: 156 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 157 break; 158 case SCSI_PROT_WRITE_INSERT: 159 *fw_prot_opts |= PO_MODE_DIF_INSERT; 160 break; 161 case SCSI_PROT_READ_INSERT: 162 *fw_prot_opts |= PO_MODE_DIF_INSERT; 163 break; 164 case SCSI_PROT_WRITE_STRIP: 165 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 166 break; 167 case SCSI_PROT_READ_PASS: 168 case SCSI_PROT_WRITE_PASS: 169 if (guard & SHOST_DIX_GUARD_IP) 170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 171 else 172 *fw_prot_opts |= PO_MODE_DIF_PASS; 173 break; 174 default: /* Normal Request */ 175 *fw_prot_opts |= PO_MODE_DIF_PASS; 176 break; 177 } 178 179 return scsi_prot_sg_count(cmd); 180 } 181 182 /* 183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 184 * capable IOCB types. 185 * 186 * @sp: SRB command to process 187 * @cmd_pkt: Command type 2 IOCB 188 * @tot_dsds: Total number of segments to transfer 189 */ 190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 191 uint16_t tot_dsds) 192 { 193 uint16_t avail_dsds; 194 struct dsd32 *cur_dsd; 195 scsi_qla_host_t *vha; 196 struct scsi_cmnd *cmd; 197 struct scatterlist *sg; 198 int i; 199 200 cmd = GET_CMD_SP(sp); 201 202 /* Update entry type to indicate Command Type 2 IOCB */ 203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); 204 205 /* No data transfer */ 206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 207 cmd_pkt->byte_count = cpu_to_le32(0); 208 return; 209 } 210 211 vha = sp->vha; 212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 213 214 /* Three DSDs are available in the Command Type 2 IOCB */ 215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); 216 cur_dsd = cmd_pkt->dsd32; 217 218 /* Load data segments */ 219 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 220 cont_entry_t *cont_pkt; 221 222 /* Allocate additional continuation packets? */ 223 if (avail_dsds == 0) { 224 /* 225 * Seven DSDs are available in the Continuation 226 * Type 0 IOCB. 227 */ 228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 229 cur_dsd = cont_pkt->dsd; 230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 231 } 232 233 append_dsd32(&cur_dsd, sg); 234 avail_dsds--; 235 } 236 } 237 238 /** 239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 240 * capable IOCB types. 241 * 242 * @sp: SRB command to process 243 * @cmd_pkt: Command type 3 IOCB 244 * @tot_dsds: Total number of segments to transfer 245 */ 246 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 247 uint16_t tot_dsds) 248 { 249 uint16_t avail_dsds; 250 struct dsd64 *cur_dsd; 251 scsi_qla_host_t *vha; 252 struct scsi_cmnd *cmd; 253 struct scatterlist *sg; 254 int i; 255 256 cmd = GET_CMD_SP(sp); 257 258 /* Update entry type to indicate Command Type 3 IOCB */ 259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); 260 261 /* No data transfer */ 262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 263 cmd_pkt->byte_count = cpu_to_le32(0); 264 return; 265 } 266 267 vha = sp->vha; 268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 269 270 /* Two DSDs are available in the Command Type 3 IOCB */ 271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); 272 cur_dsd = cmd_pkt->dsd64; 273 274 /* Load data segments */ 275 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 276 cont_a64_entry_t *cont_pkt; 277 278 /* Allocate additional continuation packets? */ 279 if (avail_dsds == 0) { 280 /* 281 * Five DSDs are available in the Continuation 282 * Type 1 IOCB. 283 */ 284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 285 cur_dsd = cont_pkt->dsd; 286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 287 } 288 289 append_dsd64(&cur_dsd, sg); 290 avail_dsds--; 291 } 292 } 293 294 /* 295 * Find the first handle that is not in use, starting from 296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is 297 * associated with @req. 298 */ 299 uint32_t qla2xxx_get_next_handle(struct req_que *req) 300 { 301 uint32_t index, handle = req->current_outstanding_cmd; 302 303 for (index = 1; index < req->num_outstanding_cmds; index++) { 304 handle++; 305 if (handle == req->num_outstanding_cmds) 306 handle = 1; 307 if (!req->outstanding_cmds[handle]) 308 return handle; 309 } 310 311 return 0; 312 } 313 314 /** 315 * qla2x00_start_scsi() - Send a SCSI command to the ISP 316 * @sp: command to send to the ISP 317 * 318 * Returns non-zero if a failure occurred, else zero. 319 */ 320 int 321 qla2x00_start_scsi(srb_t *sp) 322 { 323 int nseg; 324 unsigned long flags; 325 scsi_qla_host_t *vha; 326 struct scsi_cmnd *cmd; 327 uint32_t *clr_ptr; 328 uint32_t handle; 329 cmd_entry_t *cmd_pkt; 330 uint16_t cnt; 331 uint16_t req_cnt; 332 uint16_t tot_dsds; 333 struct device_reg_2xxx __iomem *reg; 334 struct qla_hw_data *ha; 335 struct req_que *req; 336 struct rsp_que *rsp; 337 338 /* Setup device pointers. */ 339 vha = sp->vha; 340 ha = vha->hw; 341 reg = &ha->iobase->isp; 342 cmd = GET_CMD_SP(sp); 343 req = ha->req_q_map[0]; 344 rsp = ha->rsp_q_map[0]; 345 /* So we know we haven't pci_map'ed anything yet */ 346 tot_dsds = 0; 347 348 /* Send marker if required */ 349 if (vha->marker_needed != 0) { 350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 351 QLA_SUCCESS) { 352 return (QLA_FUNCTION_FAILED); 353 } 354 vha->marker_needed = 0; 355 } 356 357 /* Acquire ring specific lock */ 358 spin_lock_irqsave(&ha->hardware_lock, flags); 359 360 handle = qla2xxx_get_next_handle(req); 361 if (handle == 0) 362 goto queuing_error; 363 364 /* Map the sg table so we have an accurate count of sg entries needed */ 365 if (scsi_sg_count(cmd)) { 366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 367 scsi_sg_count(cmd), cmd->sc_data_direction); 368 if (unlikely(!nseg)) 369 goto queuing_error; 370 } else 371 nseg = 0; 372 373 tot_dsds = nseg; 374 375 /* Calculate the number of request entries needed. */ 376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 377 if (req->cnt < (req_cnt + 2)) { 378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); 379 if (req->ring_index < cnt) 380 req->cnt = cnt - req->ring_index; 381 else 382 req->cnt = req->length - 383 (req->ring_index - cnt); 384 /* If still no head room then bail out */ 385 if (req->cnt < (req_cnt + 2)) 386 goto queuing_error; 387 } 388 389 /* Build command packet */ 390 req->current_outstanding_cmd = handle; 391 req->outstanding_cmds[handle] = sp; 392 sp->handle = handle; 393 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 394 req->cnt -= req_cnt; 395 396 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 397 cmd_pkt->handle = handle; 398 /* Zero out remaining portion of packet. */ 399 clr_ptr = (uint32_t *)cmd_pkt + 2; 400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 402 403 /* Set target ID and LUN number*/ 404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 407 408 /* Load SCSI command packet. */ 409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 411 412 /* Build IOCB segments */ 413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 414 415 /* Set total data segment count. */ 416 cmd_pkt->entry_count = (uint8_t)req_cnt; 417 wmb(); 418 419 /* Adjust ring index. */ 420 req->ring_index++; 421 if (req->ring_index == req->length) { 422 req->ring_index = 0; 423 req->ring_ptr = req->ring; 424 } else 425 req->ring_ptr++; 426 427 sp->flags |= SRB_DMA_VALID; 428 429 /* Set chip new ring index. */ 430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); 431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 432 433 /* Manage unprocessed RIO/ZIO commands in response queue. */ 434 if (vha->flags.process_response_queue && 435 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 436 qla2x00_process_response_queue(rsp); 437 438 spin_unlock_irqrestore(&ha->hardware_lock, flags); 439 return (QLA_SUCCESS); 440 441 queuing_error: 442 if (tot_dsds) 443 scsi_dma_unmap(cmd); 444 445 spin_unlock_irqrestore(&ha->hardware_lock, flags); 446 447 return (QLA_FUNCTION_FAILED); 448 } 449 450 /** 451 * qla2x00_start_iocbs() - Execute the IOCB command 452 * @vha: HA context 453 * @req: request queue 454 */ 455 void 456 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 457 { 458 struct qla_hw_data *ha = vha->hw; 459 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 460 461 if (IS_P3P_TYPE(ha)) { 462 qla82xx_start_iocbs(vha); 463 } else { 464 /* Adjust ring index. */ 465 req->ring_index++; 466 if (req->ring_index == req->length) { 467 req->ring_index = 0; 468 req->ring_ptr = req->ring; 469 } else 470 req->ring_ptr++; 471 472 /* Set chip new ring index. */ 473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 474 wrt_reg_dword(req->req_q_in, req->ring_index); 475 } else if (IS_QLA83XX(ha)) { 476 wrt_reg_dword(req->req_q_in, req->ring_index); 477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); 478 } else if (IS_QLAFX00(ha)) { 479 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); 480 rd_reg_dword_relaxed(®->ispfx00.req_q_in); 481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 482 } else if (IS_FWI2_CAPABLE(ha)) { 483 wrt_reg_dword(®->isp24.req_q_in, req->ring_index); 484 rd_reg_dword_relaxed(®->isp24.req_q_in); 485 } else { 486 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), 487 req->ring_index); 488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); 489 } 490 } 491 } 492 493 /** 494 * qla2x00_marker() - Send a marker IOCB to the firmware. 495 * @vha: HA context 496 * @qpair: queue pair pointer 497 * @loop_id: loop ID 498 * @lun: LUN 499 * @type: marker modifier 500 * 501 * Can be called from both normal and interrupt context. 502 * 503 * Returns non-zero if a failure occurred, else zero. 504 */ 505 static int 506 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 507 uint16_t loop_id, uint64_t lun, uint8_t type) 508 { 509 mrk_entry_t *mrk; 510 struct mrk_entry_24xx *mrk24 = NULL; 511 struct req_que *req = qpair->req; 512 struct qla_hw_data *ha = vha->hw; 513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 514 515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); 516 if (mrk == NULL) { 517 ql_log(ql_log_warn, base_vha, 0x3026, 518 "Failed to allocate Marker IOCB.\n"); 519 520 return (QLA_FUNCTION_FAILED); 521 } 522 523 mrk->entry_type = MARKER_TYPE; 524 mrk->modifier = type; 525 if (type != MK_SYNC_ALL) { 526 if (IS_FWI2_CAPABLE(ha)) { 527 mrk24 = (struct mrk_entry_24xx *) mrk; 528 mrk24->nport_handle = cpu_to_le16(loop_id); 529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 531 mrk24->vp_index = vha->vp_idx; 532 mrk24->handle = make_handle(req->id, mrk24->handle); 533 } else { 534 SET_TARGET_ID(ha, mrk->target, loop_id); 535 mrk->lun = cpu_to_le16((uint16_t)lun); 536 } 537 } 538 wmb(); 539 540 qla2x00_start_iocbs(vha, req); 541 542 return (QLA_SUCCESS); 543 } 544 545 int 546 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 547 uint16_t loop_id, uint64_t lun, uint8_t type) 548 { 549 int ret; 550 unsigned long flags = 0; 551 552 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); 554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 555 556 return (ret); 557 } 558 559 /* 560 * qla2x00_issue_marker 561 * 562 * Issue marker 563 * Caller CAN have hardware lock held as specified by ha_locked parameter. 564 * Might release it, then reaquire. 565 */ 566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 567 { 568 if (ha_locked) { 569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 570 MK_SYNC_ALL) != QLA_SUCCESS) 571 return QLA_FUNCTION_FAILED; 572 } else { 573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 574 MK_SYNC_ALL) != QLA_SUCCESS) 575 return QLA_FUNCTION_FAILED; 576 } 577 vha->marker_needed = 0; 578 579 return QLA_SUCCESS; 580 } 581 582 static inline int 583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 584 uint16_t tot_dsds) 585 { 586 struct dsd64 *cur_dsd = NULL, *next_dsd; 587 scsi_qla_host_t *vha; 588 struct qla_hw_data *ha; 589 struct scsi_cmnd *cmd; 590 struct scatterlist *cur_seg; 591 uint8_t avail_dsds; 592 uint8_t first_iocb = 1; 593 uint32_t dsd_list_len; 594 struct dsd_dma *dsd_ptr; 595 struct ct6_dsd *ctx; 596 struct qla_qpair *qpair = sp->qpair; 597 598 cmd = GET_CMD_SP(sp); 599 600 /* Update entry type to indicate Command Type 3 IOCB */ 601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); 602 603 /* No data transfer */ 604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 605 cmd_pkt->byte_count = cpu_to_le32(0); 606 return 0; 607 } 608 609 vha = sp->vha; 610 ha = vha->hw; 611 612 /* Set transfer direction */ 613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 615 qpair->counters.output_bytes += scsi_bufflen(cmd); 616 qpair->counters.output_requests++; 617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 619 qpair->counters.input_bytes += scsi_bufflen(cmd); 620 qpair->counters.input_requests++; 621 } 622 623 cur_seg = scsi_sglist(cmd); 624 ctx = sp->u.scmd.ct6_ctx; 625 626 while (tot_dsds) { 627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 628 QLA_DSDS_PER_IOCB : tot_dsds; 629 tot_dsds -= avail_dsds; 630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 631 632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 633 struct dsd_dma, list); 634 next_dsd = dsd_ptr->dsd_addr; 635 list_del(&dsd_ptr->list); 636 ha->gbl_dsd_avail--; 637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 638 ctx->dsd_use_cnt++; 639 ha->gbl_dsd_inuse++; 640 641 if (first_iocb) { 642 first_iocb = 0; 643 put_unaligned_le64(dsd_ptr->dsd_list_dma, 644 &cmd_pkt->fcp_dsd.address); 645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); 646 } else { 647 put_unaligned_le64(dsd_ptr->dsd_list_dma, 648 &cur_dsd->address); 649 cur_dsd->length = cpu_to_le32(dsd_list_len); 650 cur_dsd++; 651 } 652 cur_dsd = next_dsd; 653 while (avail_dsds) { 654 append_dsd64(&cur_dsd, cur_seg); 655 cur_seg = sg_next(cur_seg); 656 avail_dsds--; 657 } 658 } 659 660 /* Null termination */ 661 cur_dsd->address = 0; 662 cur_dsd->length = 0; 663 cur_dsd++; 664 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 665 return 0; 666 } 667 668 /* 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 670 * for Command Type 6. 671 * 672 * @dsds: number of data segment descriptors needed 673 * 674 * Returns the number of dsd list needed to store @dsds. 675 */ 676 static inline uint16_t 677 qla24xx_calc_dsd_lists(uint16_t dsds) 678 { 679 uint16_t dsd_lists = 0; 680 681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 682 if (dsds % QLA_DSDS_PER_IOCB) 683 dsd_lists++; 684 return dsd_lists; 685 } 686 687 688 /** 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 690 * IOCB types. 691 * 692 * @sp: SRB command to process 693 * @cmd_pkt: Command type 3 IOCB 694 * @tot_dsds: Total number of segments to transfer 695 * @req: pointer to request queue 696 */ 697 inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds, struct req_que *req) 700 { 701 uint16_t avail_dsds; 702 struct dsd64 *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 struct qla_qpair *qpair = sp->qpair; 708 709 cmd = GET_CMD_SP(sp); 710 711 /* Update entry type to indicate Command Type 3 IOCB */ 712 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); 713 714 /* No data transfer */ 715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 716 cmd_pkt->byte_count = cpu_to_le32(0); 717 return; 718 } 719 720 vha = sp->vha; 721 722 /* Set transfer direction */ 723 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 725 qpair->counters.output_bytes += scsi_bufflen(cmd); 726 qpair->counters.output_requests++; 727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 729 qpair->counters.input_bytes += scsi_bufflen(cmd); 730 qpair->counters.input_requests++; 731 } 732 733 /* One DSD is available in the Command Type 3 IOCB */ 734 avail_dsds = 1; 735 cur_dsd = &cmd_pkt->dsd; 736 737 /* Load data segments */ 738 739 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 740 cont_a64_entry_t *cont_pkt; 741 742 /* Allocate additional continuation packets? */ 743 if (avail_dsds == 0) { 744 /* 745 * Five DSDs are available in the Continuation 746 * Type 1 IOCB. 747 */ 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 749 cur_dsd = cont_pkt->dsd; 750 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 751 } 752 753 append_dsd64(&cur_dsd, sg); 754 avail_dsds--; 755 } 756 } 757 758 struct fw_dif_context { 759 __le32 ref_tag; 760 __le16 app_tag; 761 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 762 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 763 }; 764 765 /* 766 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 767 * 768 */ 769 static inline void 770 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 771 unsigned int protcnt) 772 { 773 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 774 775 switch (scsi_get_prot_type(cmd)) { 776 case SCSI_PROT_DIF_TYPE0: 777 /* 778 * No check for ql2xenablehba_err_chk, as it would be an 779 * I/O error if hba tag generation is not done. 780 */ 781 pkt->ref_tag = cpu_to_le32((uint32_t) 782 (0xffffffff & scsi_get_lba(cmd))); 783 784 if (!qla2x00_hba_err_chk_enabled(sp)) 785 break; 786 787 pkt->ref_tag_mask[0] = 0xff; 788 pkt->ref_tag_mask[1] = 0xff; 789 pkt->ref_tag_mask[2] = 0xff; 790 pkt->ref_tag_mask[3] = 0xff; 791 break; 792 793 /* 794 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 795 * match LBA in CDB + N 796 */ 797 case SCSI_PROT_DIF_TYPE2: 798 pkt->app_tag = cpu_to_le16(0); 799 pkt->app_tag_mask[0] = 0x0; 800 pkt->app_tag_mask[1] = 0x0; 801 802 pkt->ref_tag = cpu_to_le32((uint32_t) 803 (0xffffffff & scsi_get_lba(cmd))); 804 805 if (!qla2x00_hba_err_chk_enabled(sp)) 806 break; 807 808 /* enable ALL bytes of the ref tag */ 809 pkt->ref_tag_mask[0] = 0xff; 810 pkt->ref_tag_mask[1] = 0xff; 811 pkt->ref_tag_mask[2] = 0xff; 812 pkt->ref_tag_mask[3] = 0xff; 813 break; 814 815 /* For Type 3 protection: 16 bit GUARD only */ 816 case SCSI_PROT_DIF_TYPE3: 817 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 818 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 819 0x00; 820 break; 821 822 /* 823 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 824 * 16 bit app tag. 825 */ 826 case SCSI_PROT_DIF_TYPE1: 827 pkt->ref_tag = cpu_to_le32((uint32_t) 828 (0xffffffff & scsi_get_lba(cmd))); 829 pkt->app_tag = cpu_to_le16(0); 830 pkt->app_tag_mask[0] = 0x0; 831 pkt->app_tag_mask[1] = 0x0; 832 833 if (!qla2x00_hba_err_chk_enabled(sp)) 834 break; 835 836 /* enable ALL bytes of the ref tag */ 837 pkt->ref_tag_mask[0] = 0xff; 838 pkt->ref_tag_mask[1] = 0xff; 839 pkt->ref_tag_mask[2] = 0xff; 840 pkt->ref_tag_mask[3] = 0xff; 841 break; 842 } 843 } 844 845 int 846 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 847 uint32_t *partial) 848 { 849 struct scatterlist *sg; 850 uint32_t cumulative_partial, sg_len; 851 dma_addr_t sg_dma_addr; 852 853 if (sgx->num_bytes == sgx->tot_bytes) 854 return 0; 855 856 sg = sgx->cur_sg; 857 cumulative_partial = sgx->tot_partial; 858 859 sg_dma_addr = sg_dma_address(sg); 860 sg_len = sg_dma_len(sg); 861 862 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 863 864 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 865 sgx->dma_len = (blk_sz - cumulative_partial); 866 sgx->tot_partial = 0; 867 sgx->num_bytes += blk_sz; 868 *partial = 0; 869 } else { 870 sgx->dma_len = sg_len - sgx->bytes_consumed; 871 sgx->tot_partial += sgx->dma_len; 872 *partial = 1; 873 } 874 875 sgx->bytes_consumed += sgx->dma_len; 876 877 if (sg_len == sgx->bytes_consumed) { 878 sg = sg_next(sg); 879 sgx->num_sg++; 880 sgx->cur_sg = sg; 881 sgx->bytes_consumed = 0; 882 } 883 884 return 1; 885 } 886 887 int 888 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 889 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 890 { 891 void *next_dsd; 892 uint8_t avail_dsds = 0; 893 uint32_t dsd_list_len; 894 struct dsd_dma *dsd_ptr; 895 struct scatterlist *sg_prot; 896 struct dsd64 *cur_dsd = dsd; 897 uint16_t used_dsds = tot_dsds; 898 uint32_t prot_int; /* protection interval */ 899 uint32_t partial; 900 struct qla2_sgx sgx; 901 dma_addr_t sle_dma; 902 uint32_t sle_dma_len, tot_prot_dma_len = 0; 903 struct scsi_cmnd *cmd; 904 905 memset(&sgx, 0, sizeof(struct qla2_sgx)); 906 if (sp) { 907 cmd = GET_CMD_SP(sp); 908 prot_int = cmd->device->sector_size; 909 910 sgx.tot_bytes = scsi_bufflen(cmd); 911 sgx.cur_sg = scsi_sglist(cmd); 912 sgx.sp = sp; 913 914 sg_prot = scsi_prot_sglist(cmd); 915 } else if (tc) { 916 prot_int = tc->blk_sz; 917 sgx.tot_bytes = tc->bufflen; 918 sgx.cur_sg = tc->sg; 919 sg_prot = tc->prot_sg; 920 } else { 921 BUG(); 922 return 1; 923 } 924 925 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 926 927 sle_dma = sgx.dma_addr; 928 sle_dma_len = sgx.dma_len; 929 alloc_and_fill: 930 /* Allocate additional continuation packets? */ 931 if (avail_dsds == 0) { 932 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 933 QLA_DSDS_PER_IOCB : used_dsds; 934 dsd_list_len = (avail_dsds + 1) * 12; 935 used_dsds -= avail_dsds; 936 937 /* allocate tracking DS */ 938 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 939 if (!dsd_ptr) 940 return 1; 941 942 /* allocate new list */ 943 dsd_ptr->dsd_addr = next_dsd = 944 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 945 &dsd_ptr->dsd_list_dma); 946 947 if (!next_dsd) { 948 /* 949 * Need to cleanup only this dsd_ptr, rest 950 * will be done by sp_free_dma() 951 */ 952 kfree(dsd_ptr); 953 return 1; 954 } 955 956 if (sp) { 957 list_add_tail(&dsd_ptr->list, 958 &sp->u.scmd.crc_ctx->dsd_list); 959 960 sp->flags |= SRB_CRC_CTX_DSD_VALID; 961 } else { 962 list_add_tail(&dsd_ptr->list, 963 &(tc->ctx->dsd_list)); 964 *tc->ctx_dsd_alloced = 1; 965 } 966 967 968 /* add new list to cmd iocb or last list */ 969 put_unaligned_le64(dsd_ptr->dsd_list_dma, 970 &cur_dsd->address); 971 cur_dsd->length = cpu_to_le32(dsd_list_len); 972 cur_dsd = next_dsd; 973 } 974 put_unaligned_le64(sle_dma, &cur_dsd->address); 975 cur_dsd->length = cpu_to_le32(sle_dma_len); 976 cur_dsd++; 977 avail_dsds--; 978 979 if (partial == 0) { 980 /* Got a full protection interval */ 981 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 982 sle_dma_len = 8; 983 984 tot_prot_dma_len += sle_dma_len; 985 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 986 tot_prot_dma_len = 0; 987 sg_prot = sg_next(sg_prot); 988 } 989 990 partial = 1; /* So as to not re-enter this block */ 991 goto alloc_and_fill; 992 } 993 } 994 /* Null termination */ 995 cur_dsd->address = 0; 996 cur_dsd->length = 0; 997 cur_dsd++; 998 return 0; 999 } 1000 1001 int 1002 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, 1003 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 1004 { 1005 void *next_dsd; 1006 uint8_t avail_dsds = 0; 1007 uint32_t dsd_list_len; 1008 struct dsd_dma *dsd_ptr; 1009 struct scatterlist *sg, *sgl; 1010 struct dsd64 *cur_dsd = dsd; 1011 int i; 1012 uint16_t used_dsds = tot_dsds; 1013 struct scsi_cmnd *cmd; 1014 1015 if (sp) { 1016 cmd = GET_CMD_SP(sp); 1017 sgl = scsi_sglist(cmd); 1018 } else if (tc) { 1019 sgl = tc->sg; 1020 } else { 1021 BUG(); 1022 return 1; 1023 } 1024 1025 1026 for_each_sg(sgl, sg, tot_dsds, i) { 1027 /* Allocate additional continuation packets? */ 1028 if (avail_dsds == 0) { 1029 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1030 QLA_DSDS_PER_IOCB : used_dsds; 1031 dsd_list_len = (avail_dsds + 1) * 12; 1032 used_dsds -= avail_dsds; 1033 1034 /* allocate tracking DS */ 1035 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1036 if (!dsd_ptr) 1037 return 1; 1038 1039 /* allocate new list */ 1040 dsd_ptr->dsd_addr = next_dsd = 1041 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1042 &dsd_ptr->dsd_list_dma); 1043 1044 if (!next_dsd) { 1045 /* 1046 * Need to cleanup only this dsd_ptr, rest 1047 * will be done by sp_free_dma() 1048 */ 1049 kfree(dsd_ptr); 1050 return 1; 1051 } 1052 1053 if (sp) { 1054 list_add_tail(&dsd_ptr->list, 1055 &sp->u.scmd.crc_ctx->dsd_list); 1056 1057 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1058 } else { 1059 list_add_tail(&dsd_ptr->list, 1060 &(tc->ctx->dsd_list)); 1061 *tc->ctx_dsd_alloced = 1; 1062 } 1063 1064 /* add new list to cmd iocb or last list */ 1065 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1066 &cur_dsd->address); 1067 cur_dsd->length = cpu_to_le32(dsd_list_len); 1068 cur_dsd = next_dsd; 1069 } 1070 append_dsd64(&cur_dsd, sg); 1071 avail_dsds--; 1072 1073 } 1074 /* Null termination */ 1075 cur_dsd->address = 0; 1076 cur_dsd->length = 0; 1077 cur_dsd++; 1078 return 0; 1079 } 1080 1081 int 1082 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1083 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1084 { 1085 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1086 struct scatterlist *sg, *sgl; 1087 struct crc_context *difctx = NULL; 1088 struct scsi_qla_host *vha; 1089 uint dsd_list_len; 1090 uint avail_dsds = 0; 1091 uint used_dsds = tot_dsds; 1092 bool dif_local_dma_alloc = false; 1093 bool direction_to_device = false; 1094 int i; 1095 1096 if (sp) { 1097 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1098 1099 sgl = scsi_prot_sglist(cmd); 1100 vha = sp->vha; 1101 difctx = sp->u.scmd.crc_ctx; 1102 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1103 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1104 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1105 __func__, cmd, difctx, sp); 1106 } else if (tc) { 1107 vha = tc->vha; 1108 sgl = tc->prot_sg; 1109 difctx = tc->ctx; 1110 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; 1111 } else { 1112 BUG(); 1113 return 1; 1114 } 1115 1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1117 "%s: enter (write=%u)\n", __func__, direction_to_device); 1118 1119 /* if initiator doing write or target doing read */ 1120 if (direction_to_device) { 1121 for_each_sg(sgl, sg, tot_dsds, i) { 1122 u64 sle_phys = sg_phys(sg); 1123 1124 /* If SGE addr + len flips bits in upper 32-bits */ 1125 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1126 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, 1127 "%s: page boundary crossing (phys=%llx len=%x)\n", 1128 __func__, sle_phys, sg->length); 1129 1130 if (difctx) { 1131 ha->dif_bundle_crossed_pages++; 1132 dif_local_dma_alloc = true; 1133 } else { 1134 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1135 vha, 0xe022, 1136 "%s: difctx pointer is NULL\n", 1137 __func__); 1138 } 1139 break; 1140 } 1141 } 1142 ha->dif_bundle_writes++; 1143 } else { 1144 ha->dif_bundle_reads++; 1145 } 1146 1147 if (ql2xdifbundlinginternalbuffers) 1148 dif_local_dma_alloc = direction_to_device; 1149 1150 if (dif_local_dma_alloc) { 1151 u32 track_difbundl_buf = 0; 1152 u32 ldma_sg_len = 0; 1153 u8 ldma_needed = 1; 1154 1155 difctx->no_dif_bundl = 0; 1156 difctx->dif_bundl_len = 0; 1157 1158 /* Track DSD buffers */ 1159 INIT_LIST_HEAD(&difctx->ldif_dsd_list); 1160 /* Track local DMA buffers */ 1161 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); 1162 1163 for_each_sg(sgl, sg, tot_dsds, i) { 1164 u32 sglen = sg_dma_len(sg); 1165 1166 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1167 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1168 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, 1169 difctx->dif_bundl_len, ldma_needed); 1170 1171 while (sglen) { 1172 u32 xfrlen = 0; 1173 1174 if (ldma_needed) { 1175 /* 1176 * Allocate list item to store 1177 * the DMA buffers 1178 */ 1179 dsd_ptr = kzalloc(sizeof(*dsd_ptr), 1180 GFP_ATOMIC); 1181 if (!dsd_ptr) { 1182 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1183 "%s: failed alloc dsd_ptr\n", 1184 __func__); 1185 return 1; 1186 } 1187 ha->dif_bundle_kallocs++; 1188 1189 /* allocate dma buffer */ 1190 dsd_ptr->dsd_addr = dma_pool_alloc 1191 (ha->dif_bundl_pool, GFP_ATOMIC, 1192 &dsd_ptr->dsd_list_dma); 1193 if (!dsd_ptr->dsd_addr) { 1194 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1195 "%s: failed alloc ->dsd_ptr\n", 1196 __func__); 1197 /* 1198 * need to cleanup only this 1199 * dsd_ptr rest will be done 1200 * by sp_free_dma() 1201 */ 1202 kfree(dsd_ptr); 1203 ha->dif_bundle_kallocs--; 1204 return 1; 1205 } 1206 ha->dif_bundle_dma_allocs++; 1207 ldma_needed = 0; 1208 difctx->no_dif_bundl++; 1209 list_add_tail(&dsd_ptr->list, 1210 &difctx->ldif_dma_hndl_list); 1211 } 1212 1213 /* xfrlen is min of dma pool size and sglen */ 1214 xfrlen = (sglen > 1215 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? 1216 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : 1217 sglen; 1218 1219 /* replace with local allocated dma buffer */ 1220 sg_pcopy_to_buffer(sgl, sg_nents(sgl), 1221 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, 1222 difctx->dif_bundl_len); 1223 difctx->dif_bundl_len += xfrlen; 1224 sglen -= xfrlen; 1225 ldma_sg_len += xfrlen; 1226 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || 1227 sg_is_last(sg)) { 1228 ldma_needed = 1; 1229 ldma_sg_len = 0; 1230 } 1231 } 1232 } 1233 1234 track_difbundl_buf = used_dsds = difctx->no_dif_bundl; 1235 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, 1236 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", 1237 difctx->dif_bundl_len, difctx->no_dif_bundl, 1238 track_difbundl_buf); 1239 1240 if (sp) 1241 sp->flags |= SRB_DIF_BUNDL_DMA_VALID; 1242 else 1243 tc->prot_flags = DIF_BUNDL_DMA_VALID; 1244 1245 list_for_each_entry_safe(dif_dsd, nxt_dsd, 1246 &difctx->ldif_dma_hndl_list, list) { 1247 u32 sglen = (difctx->dif_bundl_len > 1248 DIF_BUNDLING_DMA_POOL_SIZE) ? 1249 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; 1250 1251 BUG_ON(track_difbundl_buf == 0); 1252 1253 /* Allocate additional continuation packets? */ 1254 if (avail_dsds == 0) { 1255 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 1256 0xe024, 1257 "%s: adding continuation iocb's\n", 1258 __func__); 1259 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1260 QLA_DSDS_PER_IOCB : used_dsds; 1261 dsd_list_len = (avail_dsds + 1) * 12; 1262 used_dsds -= avail_dsds; 1263 1264 /* allocate tracking DS */ 1265 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1266 if (!dsd_ptr) { 1267 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1268 "%s: failed alloc dsd_ptr\n", 1269 __func__); 1270 return 1; 1271 } 1272 ha->dif_bundle_kallocs++; 1273 1274 difctx->no_ldif_dsd++; 1275 /* allocate new list */ 1276 dsd_ptr->dsd_addr = 1277 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1278 &dsd_ptr->dsd_list_dma); 1279 if (!dsd_ptr->dsd_addr) { 1280 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1281 "%s: failed alloc ->dsd_addr\n", 1282 __func__); 1283 /* 1284 * need to cleanup only this dsd_ptr 1285 * rest will be done by sp_free_dma() 1286 */ 1287 kfree(dsd_ptr); 1288 ha->dif_bundle_kallocs--; 1289 return 1; 1290 } 1291 ha->dif_bundle_dma_allocs++; 1292 1293 if (sp) { 1294 list_add_tail(&dsd_ptr->list, 1295 &difctx->ldif_dsd_list); 1296 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1297 } else { 1298 list_add_tail(&dsd_ptr->list, 1299 &difctx->ldif_dsd_list); 1300 tc->ctx_dsd_alloced = 1; 1301 } 1302 1303 /* add new list to cmd iocb or last list */ 1304 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1305 &cur_dsd->address); 1306 cur_dsd->length = cpu_to_le32(dsd_list_len); 1307 cur_dsd = dsd_ptr->dsd_addr; 1308 } 1309 put_unaligned_le64(dif_dsd->dsd_list_dma, 1310 &cur_dsd->address); 1311 cur_dsd->length = cpu_to_le32(sglen); 1312 cur_dsd++; 1313 avail_dsds--; 1314 difctx->dif_bundl_len -= sglen; 1315 track_difbundl_buf--; 1316 } 1317 1318 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, 1319 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, 1320 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1321 } else { 1322 for_each_sg(sgl, sg, tot_dsds, i) { 1323 /* Allocate additional continuation packets? */ 1324 if (avail_dsds == 0) { 1325 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1326 QLA_DSDS_PER_IOCB : used_dsds; 1327 dsd_list_len = (avail_dsds + 1) * 12; 1328 used_dsds -= avail_dsds; 1329 1330 /* allocate tracking DS */ 1331 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1332 if (!dsd_ptr) { 1333 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1334 vha, 0xe027, 1335 "%s: failed alloc dsd_dma...\n", 1336 __func__); 1337 return 1; 1338 } 1339 1340 /* allocate new list */ 1341 dsd_ptr->dsd_addr = 1342 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1343 &dsd_ptr->dsd_list_dma); 1344 if (!dsd_ptr->dsd_addr) { 1345 /* need to cleanup only this dsd_ptr */ 1346 /* rest will be done by sp_free_dma() */ 1347 kfree(dsd_ptr); 1348 return 1; 1349 } 1350 1351 if (sp) { 1352 list_add_tail(&dsd_ptr->list, 1353 &difctx->dsd_list); 1354 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1355 } else { 1356 list_add_tail(&dsd_ptr->list, 1357 &difctx->dsd_list); 1358 tc->ctx_dsd_alloced = 1; 1359 } 1360 1361 /* add new list to cmd iocb or last list */ 1362 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1363 &cur_dsd->address); 1364 cur_dsd->length = cpu_to_le32(dsd_list_len); 1365 cur_dsd = dsd_ptr->dsd_addr; 1366 } 1367 append_dsd64(&cur_dsd, sg); 1368 avail_dsds--; 1369 } 1370 } 1371 /* Null termination */ 1372 cur_dsd->address = 0; 1373 cur_dsd->length = 0; 1374 cur_dsd++; 1375 return 0; 1376 } 1377 1378 /** 1379 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1380 * Type 6 IOCB types. 1381 * 1382 * @sp: SRB command to process 1383 * @cmd_pkt: Command type 3 IOCB 1384 * @tot_dsds: Total number of segments to transfer 1385 * @tot_prot_dsds: Total number of segments with protection information 1386 * @fw_prot_opts: Protection options to be passed to firmware 1387 */ 1388 static inline int 1389 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1390 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1391 { 1392 struct dsd64 *cur_dsd; 1393 __be32 *fcp_dl; 1394 scsi_qla_host_t *vha; 1395 struct scsi_cmnd *cmd; 1396 uint32_t total_bytes = 0; 1397 uint32_t data_bytes; 1398 uint32_t dif_bytes; 1399 uint8_t bundling = 1; 1400 uint16_t blk_size; 1401 struct crc_context *crc_ctx_pkt = NULL; 1402 struct qla_hw_data *ha; 1403 uint8_t additional_fcpcdb_len; 1404 uint16_t fcp_cmnd_len; 1405 struct fcp_cmnd *fcp_cmnd; 1406 dma_addr_t crc_ctx_dma; 1407 1408 cmd = GET_CMD_SP(sp); 1409 1410 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1411 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); 1412 1413 vha = sp->vha; 1414 ha = vha->hw; 1415 1416 /* No data transfer */ 1417 data_bytes = scsi_bufflen(cmd); 1418 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1419 cmd_pkt->byte_count = cpu_to_le32(0); 1420 return QLA_SUCCESS; 1421 } 1422 1423 cmd_pkt->vp_index = sp->vha->vp_idx; 1424 1425 /* Set transfer direction */ 1426 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1427 cmd_pkt->control_flags = 1428 cpu_to_le16(CF_WRITE_DATA); 1429 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1430 cmd_pkt->control_flags = 1431 cpu_to_le16(CF_READ_DATA); 1432 } 1433 1434 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1436 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1437 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1438 bundling = 0; 1439 1440 /* Allocate CRC context from global pool */ 1441 crc_ctx_pkt = sp->u.scmd.crc_ctx = 1442 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1443 1444 if (!crc_ctx_pkt) 1445 goto crc_queuing_error; 1446 1447 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1448 1449 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1450 1451 /* Set handle */ 1452 crc_ctx_pkt->handle = cmd_pkt->handle; 1453 1454 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1455 1456 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1457 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1458 1459 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); 1460 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 1461 1462 /* Determine SCSI command length -- align to 4 byte boundary */ 1463 if (cmd->cmd_len > 16) { 1464 additional_fcpcdb_len = cmd->cmd_len - 16; 1465 if ((cmd->cmd_len % 4) != 0) { 1466 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1467 goto crc_queuing_error; 1468 } 1469 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1470 } else { 1471 additional_fcpcdb_len = 0; 1472 fcp_cmnd_len = 12 + 16 + 4; 1473 } 1474 1475 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1476 1477 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1478 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1479 fcp_cmnd->additional_cdb_len |= 1; 1480 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1481 fcp_cmnd->additional_cdb_len |= 2; 1482 1483 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1484 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1485 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1486 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, 1487 &cmd_pkt->fcp_cmnd_dseg_address); 1488 fcp_cmnd->task_management = 0; 1489 fcp_cmnd->task_attribute = TSK_SIMPLE; 1490 1491 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1492 1493 /* Compute dif len and adjust data len to incude protection */ 1494 dif_bytes = 0; 1495 blk_size = cmd->device->sector_size; 1496 dif_bytes = (data_bytes / blk_size) * 8; 1497 1498 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1499 case SCSI_PROT_READ_INSERT: 1500 case SCSI_PROT_WRITE_STRIP: 1501 total_bytes = data_bytes; 1502 data_bytes += dif_bytes; 1503 break; 1504 1505 case SCSI_PROT_READ_STRIP: 1506 case SCSI_PROT_WRITE_INSERT: 1507 case SCSI_PROT_READ_PASS: 1508 case SCSI_PROT_WRITE_PASS: 1509 total_bytes = data_bytes + dif_bytes; 1510 break; 1511 default: 1512 BUG(); 1513 } 1514 1515 if (!qla2x00_hba_err_chk_enabled(sp)) 1516 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1517 /* HBA error checking enabled */ 1518 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1519 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1520 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1521 SCSI_PROT_DIF_TYPE2)) 1522 fw_prot_opts |= BIT_10; 1523 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1524 SCSI_PROT_DIF_TYPE3) 1525 fw_prot_opts |= BIT_11; 1526 } 1527 1528 if (!bundling) { 1529 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 1530 } else { 1531 /* 1532 * Configure Bundling if we need to fetch interlaving 1533 * protection PCI accesses 1534 */ 1535 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1536 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1537 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1538 tot_prot_dsds); 1539 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 1540 } 1541 1542 /* Finish the common fields of CRC pkt */ 1543 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1544 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1545 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1546 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1547 /* Fibre channel byte count */ 1548 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1549 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1550 additional_fcpcdb_len); 1551 *fcp_dl = htonl(total_bytes); 1552 1553 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1554 cmd_pkt->byte_count = cpu_to_le32(0); 1555 return QLA_SUCCESS; 1556 } 1557 /* Walks data segments */ 1558 1559 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1560 1561 if (!bundling && tot_prot_dsds) { 1562 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1563 cur_dsd, tot_dsds, NULL)) 1564 goto crc_queuing_error; 1565 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1566 (tot_dsds - tot_prot_dsds), NULL)) 1567 goto crc_queuing_error; 1568 1569 if (bundling && tot_prot_dsds) { 1570 /* Walks dif segments */ 1571 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1572 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 1573 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1574 tot_prot_dsds, NULL)) 1575 goto crc_queuing_error; 1576 } 1577 return QLA_SUCCESS; 1578 1579 crc_queuing_error: 1580 /* Cleanup will be performed by the caller */ 1581 1582 return QLA_FUNCTION_FAILED; 1583 } 1584 1585 /** 1586 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1587 * @sp: command to send to the ISP 1588 * 1589 * Returns non-zero if a failure occurred, else zero. 1590 */ 1591 int 1592 qla24xx_start_scsi(srb_t *sp) 1593 { 1594 int nseg; 1595 unsigned long flags; 1596 uint32_t *clr_ptr; 1597 uint32_t handle; 1598 struct cmd_type_7 *cmd_pkt; 1599 uint16_t cnt; 1600 uint16_t req_cnt; 1601 uint16_t tot_dsds; 1602 struct req_que *req = NULL; 1603 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1604 struct scsi_qla_host *vha = sp->vha; 1605 struct qla_hw_data *ha = vha->hw; 1606 1607 /* Setup device pointers. */ 1608 req = vha->req; 1609 1610 /* So we know we haven't pci_map'ed anything yet */ 1611 tot_dsds = 0; 1612 1613 /* Send marker if required */ 1614 if (vha->marker_needed != 0) { 1615 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1616 QLA_SUCCESS) 1617 return QLA_FUNCTION_FAILED; 1618 vha->marker_needed = 0; 1619 } 1620 1621 /* Acquire ring specific lock */ 1622 spin_lock_irqsave(&ha->hardware_lock, flags); 1623 1624 handle = qla2xxx_get_next_handle(req); 1625 if (handle == 0) 1626 goto queuing_error; 1627 1628 /* Map the sg table so we have an accurate count of sg entries needed */ 1629 if (scsi_sg_count(cmd)) { 1630 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1631 scsi_sg_count(cmd), cmd->sc_data_direction); 1632 if (unlikely(!nseg)) 1633 goto queuing_error; 1634 } else 1635 nseg = 0; 1636 1637 tot_dsds = nseg; 1638 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1639 1640 sp->iores.res_type = RESOURCE_INI; 1641 sp->iores.iocb_cnt = req_cnt; 1642 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1643 goto queuing_error; 1644 1645 if (req->cnt < (req_cnt + 2)) { 1646 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1647 rd_reg_dword_relaxed(req->req_q_out); 1648 if (req->ring_index < cnt) 1649 req->cnt = cnt - req->ring_index; 1650 else 1651 req->cnt = req->length - 1652 (req->ring_index - cnt); 1653 if (req->cnt < (req_cnt + 2)) 1654 goto queuing_error; 1655 } 1656 1657 /* Build command packet. */ 1658 req->current_outstanding_cmd = handle; 1659 req->outstanding_cmds[handle] = sp; 1660 sp->handle = handle; 1661 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1662 req->cnt -= req_cnt; 1663 1664 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1665 cmd_pkt->handle = make_handle(req->id, handle); 1666 1667 /* Zero out remaining portion of packet. */ 1668 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1669 clr_ptr = (uint32_t *)cmd_pkt + 2; 1670 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1671 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1672 1673 /* Set NPORT-ID and LUN number*/ 1674 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1675 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1676 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1677 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1678 cmd_pkt->vp_index = sp->vha->vp_idx; 1679 1680 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1681 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1682 1683 cmd_pkt->task = TSK_SIMPLE; 1684 1685 /* Load SCSI command packet. */ 1686 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1687 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1688 1689 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1690 1691 /* Build IOCB segments */ 1692 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1693 1694 /* Set total data segment count. */ 1695 cmd_pkt->entry_count = (uint8_t)req_cnt; 1696 wmb(); 1697 /* Adjust ring index. */ 1698 req->ring_index++; 1699 if (req->ring_index == req->length) { 1700 req->ring_index = 0; 1701 req->ring_ptr = req->ring; 1702 } else 1703 req->ring_ptr++; 1704 1705 sp->flags |= SRB_DMA_VALID; 1706 1707 /* Set chip new ring index. */ 1708 wrt_reg_dword(req->req_q_in, req->ring_index); 1709 1710 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1711 return QLA_SUCCESS; 1712 1713 queuing_error: 1714 if (tot_dsds) 1715 scsi_dma_unmap(cmd); 1716 1717 qla_put_iocbs(sp->qpair, &sp->iores); 1718 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1719 1720 return QLA_FUNCTION_FAILED; 1721 } 1722 1723 /** 1724 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1725 * @sp: command to send to the ISP 1726 * 1727 * Returns non-zero if a failure occurred, else zero. 1728 */ 1729 int 1730 qla24xx_dif_start_scsi(srb_t *sp) 1731 { 1732 int nseg; 1733 unsigned long flags; 1734 uint32_t *clr_ptr; 1735 uint32_t handle; 1736 uint16_t cnt; 1737 uint16_t req_cnt = 0; 1738 uint16_t tot_dsds; 1739 uint16_t tot_prot_dsds; 1740 uint16_t fw_prot_opts = 0; 1741 struct req_que *req = NULL; 1742 struct rsp_que *rsp = NULL; 1743 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1744 struct scsi_qla_host *vha = sp->vha; 1745 struct qla_hw_data *ha = vha->hw; 1746 struct cmd_type_crc_2 *cmd_pkt; 1747 uint32_t status = 0; 1748 1749 #define QDSS_GOT_Q_SPACE BIT_0 1750 1751 /* Only process protection or >16 cdb in this routine */ 1752 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1753 if (cmd->cmd_len <= 16) 1754 return qla24xx_start_scsi(sp); 1755 } 1756 1757 /* Setup device pointers. */ 1758 req = vha->req; 1759 rsp = req->rsp; 1760 1761 /* So we know we haven't pci_map'ed anything yet */ 1762 tot_dsds = 0; 1763 1764 /* Send marker if required */ 1765 if (vha->marker_needed != 0) { 1766 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1767 QLA_SUCCESS) 1768 return QLA_FUNCTION_FAILED; 1769 vha->marker_needed = 0; 1770 } 1771 1772 /* Acquire ring specific lock */ 1773 spin_lock_irqsave(&ha->hardware_lock, flags); 1774 1775 handle = qla2xxx_get_next_handle(req); 1776 if (handle == 0) 1777 goto queuing_error; 1778 1779 /* Compute number of required data segments */ 1780 /* Map the sg table so we have an accurate count of sg entries needed */ 1781 if (scsi_sg_count(cmd)) { 1782 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1783 scsi_sg_count(cmd), cmd->sc_data_direction); 1784 if (unlikely(!nseg)) 1785 goto queuing_error; 1786 else 1787 sp->flags |= SRB_DMA_VALID; 1788 1789 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1790 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1791 struct qla2_sgx sgx; 1792 uint32_t partial; 1793 1794 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1795 sgx.tot_bytes = scsi_bufflen(cmd); 1796 sgx.cur_sg = scsi_sglist(cmd); 1797 sgx.sp = sp; 1798 1799 nseg = 0; 1800 while (qla24xx_get_one_block_sg( 1801 cmd->device->sector_size, &sgx, &partial)) 1802 nseg++; 1803 } 1804 } else 1805 nseg = 0; 1806 1807 /* number of required data segments */ 1808 tot_dsds = nseg; 1809 1810 /* Compute number of required protection segments */ 1811 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1812 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1813 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1814 if (unlikely(!nseg)) 1815 goto queuing_error; 1816 else 1817 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1818 1819 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1820 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1821 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1822 } 1823 } else { 1824 nseg = 0; 1825 } 1826 1827 req_cnt = 1; 1828 /* Total Data and protection sg segment(s) */ 1829 tot_prot_dsds = nseg; 1830 tot_dsds += nseg; 1831 1832 sp->iores.res_type = RESOURCE_INI; 1833 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1834 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1835 goto queuing_error; 1836 1837 if (req->cnt < (req_cnt + 2)) { 1838 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1839 rd_reg_dword_relaxed(req->req_q_out); 1840 if (req->ring_index < cnt) 1841 req->cnt = cnt - req->ring_index; 1842 else 1843 req->cnt = req->length - 1844 (req->ring_index - cnt); 1845 if (req->cnt < (req_cnt + 2)) 1846 goto queuing_error; 1847 } 1848 1849 status |= QDSS_GOT_Q_SPACE; 1850 1851 /* Build header part of command packet (excluding the OPCODE). */ 1852 req->current_outstanding_cmd = handle; 1853 req->outstanding_cmds[handle] = sp; 1854 sp->handle = handle; 1855 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1856 req->cnt -= req_cnt; 1857 1858 /* Fill-in common area */ 1859 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1860 cmd_pkt->handle = make_handle(req->id, handle); 1861 1862 clr_ptr = (uint32_t *)cmd_pkt + 2; 1863 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1864 1865 /* Set NPORT-ID and LUN number*/ 1866 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1867 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1868 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1869 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1870 1871 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1872 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1873 1874 /* Total Data and protection segment(s) */ 1875 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1876 1877 /* Build IOCB segments and adjust for data protection segments */ 1878 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1879 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1880 QLA_SUCCESS) 1881 goto queuing_error; 1882 1883 cmd_pkt->entry_count = (uint8_t)req_cnt; 1884 /* Specify response queue number where completion should happen */ 1885 cmd_pkt->entry_status = (uint8_t) rsp->id; 1886 cmd_pkt->timeout = cpu_to_le16(0); 1887 wmb(); 1888 1889 /* Adjust ring index. */ 1890 req->ring_index++; 1891 if (req->ring_index == req->length) { 1892 req->ring_index = 0; 1893 req->ring_ptr = req->ring; 1894 } else 1895 req->ring_ptr++; 1896 1897 /* Set chip new ring index. */ 1898 wrt_reg_dword(req->req_q_in, req->ring_index); 1899 1900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1901 1902 return QLA_SUCCESS; 1903 1904 queuing_error: 1905 if (status & QDSS_GOT_Q_SPACE) { 1906 req->outstanding_cmds[handle] = NULL; 1907 req->cnt += req_cnt; 1908 } 1909 /* Cleanup will be performed by the caller (queuecommand) */ 1910 1911 qla_put_iocbs(sp->qpair, &sp->iores); 1912 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1913 return QLA_FUNCTION_FAILED; 1914 } 1915 1916 /** 1917 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1918 * @sp: command to send to the ISP 1919 * 1920 * Returns non-zero if a failure occurred, else zero. 1921 */ 1922 static int 1923 qla2xxx_start_scsi_mq(srb_t *sp) 1924 { 1925 int nseg; 1926 unsigned long flags; 1927 uint32_t *clr_ptr; 1928 uint32_t handle; 1929 struct cmd_type_7 *cmd_pkt; 1930 uint16_t cnt; 1931 uint16_t req_cnt; 1932 uint16_t tot_dsds; 1933 struct req_que *req = NULL; 1934 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1935 struct scsi_qla_host *vha = sp->fcport->vha; 1936 struct qla_hw_data *ha = vha->hw; 1937 struct qla_qpair *qpair = sp->qpair; 1938 1939 /* Acquire qpair specific lock */ 1940 spin_lock_irqsave(&qpair->qp_lock, flags); 1941 1942 /* Setup qpair pointers */ 1943 req = qpair->req; 1944 1945 /* So we know we haven't pci_map'ed anything yet */ 1946 tot_dsds = 0; 1947 1948 /* Send marker if required */ 1949 if (vha->marker_needed != 0) { 1950 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 1951 QLA_SUCCESS) { 1952 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1953 return QLA_FUNCTION_FAILED; 1954 } 1955 vha->marker_needed = 0; 1956 } 1957 1958 handle = qla2xxx_get_next_handle(req); 1959 if (handle == 0) 1960 goto queuing_error; 1961 1962 /* Map the sg table so we have an accurate count of sg entries needed */ 1963 if (scsi_sg_count(cmd)) { 1964 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1965 scsi_sg_count(cmd), cmd->sc_data_direction); 1966 if (unlikely(!nseg)) 1967 goto queuing_error; 1968 } else 1969 nseg = 0; 1970 1971 tot_dsds = nseg; 1972 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1973 1974 sp->iores.res_type = RESOURCE_INI; 1975 sp->iores.iocb_cnt = req_cnt; 1976 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1977 goto queuing_error; 1978 1979 if (req->cnt < (req_cnt + 2)) { 1980 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1981 rd_reg_dword_relaxed(req->req_q_out); 1982 if (req->ring_index < cnt) 1983 req->cnt = cnt - req->ring_index; 1984 else 1985 req->cnt = req->length - 1986 (req->ring_index - cnt); 1987 if (req->cnt < (req_cnt + 2)) 1988 goto queuing_error; 1989 } 1990 1991 /* Build command packet. */ 1992 req->current_outstanding_cmd = handle; 1993 req->outstanding_cmds[handle] = sp; 1994 sp->handle = handle; 1995 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1996 req->cnt -= req_cnt; 1997 1998 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1999 cmd_pkt->handle = make_handle(req->id, handle); 2000 2001 /* Zero out remaining portion of packet. */ 2002 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2003 clr_ptr = (uint32_t *)cmd_pkt + 2; 2004 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2005 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2006 2007 /* Set NPORT-ID and LUN number*/ 2008 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2009 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2010 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2011 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2012 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2013 2014 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2015 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2016 2017 cmd_pkt->task = TSK_SIMPLE; 2018 2019 /* Load SCSI command packet. */ 2020 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2021 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2022 2023 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2024 2025 /* Build IOCB segments */ 2026 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2027 2028 /* Set total data segment count. */ 2029 cmd_pkt->entry_count = (uint8_t)req_cnt; 2030 wmb(); 2031 /* Adjust ring index. */ 2032 req->ring_index++; 2033 if (req->ring_index == req->length) { 2034 req->ring_index = 0; 2035 req->ring_ptr = req->ring; 2036 } else 2037 req->ring_ptr++; 2038 2039 sp->flags |= SRB_DMA_VALID; 2040 2041 /* Set chip new ring index. */ 2042 wrt_reg_dword(req->req_q_in, req->ring_index); 2043 2044 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2045 return QLA_SUCCESS; 2046 2047 queuing_error: 2048 if (tot_dsds) 2049 scsi_dma_unmap(cmd); 2050 2051 qla_put_iocbs(sp->qpair, &sp->iores); 2052 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2053 2054 return QLA_FUNCTION_FAILED; 2055 } 2056 2057 2058 /** 2059 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 2060 * @sp: command to send to the ISP 2061 * 2062 * Returns non-zero if a failure occurred, else zero. 2063 */ 2064 int 2065 qla2xxx_dif_start_scsi_mq(srb_t *sp) 2066 { 2067 int nseg; 2068 unsigned long flags; 2069 uint32_t *clr_ptr; 2070 uint32_t handle; 2071 uint16_t cnt; 2072 uint16_t req_cnt = 0; 2073 uint16_t tot_dsds; 2074 uint16_t tot_prot_dsds; 2075 uint16_t fw_prot_opts = 0; 2076 struct req_que *req = NULL; 2077 struct rsp_que *rsp = NULL; 2078 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2079 struct scsi_qla_host *vha = sp->fcport->vha; 2080 struct qla_hw_data *ha = vha->hw; 2081 struct cmd_type_crc_2 *cmd_pkt; 2082 uint32_t status = 0; 2083 struct qla_qpair *qpair = sp->qpair; 2084 2085 #define QDSS_GOT_Q_SPACE BIT_0 2086 2087 /* Check for host side state */ 2088 if (!qpair->online) { 2089 cmd->result = DID_NO_CONNECT << 16; 2090 return QLA_INTERFACE_ERROR; 2091 } 2092 2093 if (!qpair->difdix_supported && 2094 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2095 cmd->result = DID_NO_CONNECT << 16; 2096 return QLA_INTERFACE_ERROR; 2097 } 2098 2099 /* Only process protection or >16 cdb in this routine */ 2100 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 2101 if (cmd->cmd_len <= 16) 2102 return qla2xxx_start_scsi_mq(sp); 2103 } 2104 2105 spin_lock_irqsave(&qpair->qp_lock, flags); 2106 2107 /* Setup qpair pointers */ 2108 rsp = qpair->rsp; 2109 req = qpair->req; 2110 2111 /* So we know we haven't pci_map'ed anything yet */ 2112 tot_dsds = 0; 2113 2114 /* Send marker if required */ 2115 if (vha->marker_needed != 0) { 2116 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 2117 QLA_SUCCESS) { 2118 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2119 return QLA_FUNCTION_FAILED; 2120 } 2121 vha->marker_needed = 0; 2122 } 2123 2124 handle = qla2xxx_get_next_handle(req); 2125 if (handle == 0) 2126 goto queuing_error; 2127 2128 /* Compute number of required data segments */ 2129 /* Map the sg table so we have an accurate count of sg entries needed */ 2130 if (scsi_sg_count(cmd)) { 2131 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2132 scsi_sg_count(cmd), cmd->sc_data_direction); 2133 if (unlikely(!nseg)) 2134 goto queuing_error; 2135 else 2136 sp->flags |= SRB_DMA_VALID; 2137 2138 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2139 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2140 struct qla2_sgx sgx; 2141 uint32_t partial; 2142 2143 memset(&sgx, 0, sizeof(struct qla2_sgx)); 2144 sgx.tot_bytes = scsi_bufflen(cmd); 2145 sgx.cur_sg = scsi_sglist(cmd); 2146 sgx.sp = sp; 2147 2148 nseg = 0; 2149 while (qla24xx_get_one_block_sg( 2150 cmd->device->sector_size, &sgx, &partial)) 2151 nseg++; 2152 } 2153 } else 2154 nseg = 0; 2155 2156 /* number of required data segments */ 2157 tot_dsds = nseg; 2158 2159 /* Compute number of required protection segments */ 2160 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2161 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2162 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2163 if (unlikely(!nseg)) 2164 goto queuing_error; 2165 else 2166 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2167 2168 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2169 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2170 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2171 } 2172 } else { 2173 nseg = 0; 2174 } 2175 2176 req_cnt = 1; 2177 /* Total Data and protection sg segment(s) */ 2178 tot_prot_dsds = nseg; 2179 tot_dsds += nseg; 2180 2181 sp->iores.res_type = RESOURCE_INI; 2182 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2183 if (qla_get_iocbs(sp->qpair, &sp->iores)) 2184 goto queuing_error; 2185 2186 if (req->cnt < (req_cnt + 2)) { 2187 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2188 rd_reg_dword_relaxed(req->req_q_out); 2189 if (req->ring_index < cnt) 2190 req->cnt = cnt - req->ring_index; 2191 else 2192 req->cnt = req->length - 2193 (req->ring_index - cnt); 2194 if (req->cnt < (req_cnt + 2)) 2195 goto queuing_error; 2196 } 2197 2198 status |= QDSS_GOT_Q_SPACE; 2199 2200 /* Build header part of command packet (excluding the OPCODE). */ 2201 req->current_outstanding_cmd = handle; 2202 req->outstanding_cmds[handle] = sp; 2203 sp->handle = handle; 2204 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2205 req->cnt -= req_cnt; 2206 2207 /* Fill-in common area */ 2208 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2209 cmd_pkt->handle = make_handle(req->id, handle); 2210 2211 clr_ptr = (uint32_t *)cmd_pkt + 2; 2212 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2213 2214 /* Set NPORT-ID and LUN number*/ 2215 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2216 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2217 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2218 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2219 2220 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2221 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2222 2223 /* Total Data and protection segment(s) */ 2224 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2225 2226 /* Build IOCB segments and adjust for data protection segments */ 2227 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2228 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2229 QLA_SUCCESS) 2230 goto queuing_error; 2231 2232 cmd_pkt->entry_count = (uint8_t)req_cnt; 2233 cmd_pkt->timeout = cpu_to_le16(0); 2234 wmb(); 2235 2236 /* Adjust ring index. */ 2237 req->ring_index++; 2238 if (req->ring_index == req->length) { 2239 req->ring_index = 0; 2240 req->ring_ptr = req->ring; 2241 } else 2242 req->ring_ptr++; 2243 2244 /* Set chip new ring index. */ 2245 wrt_reg_dword(req->req_q_in, req->ring_index); 2246 2247 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2248 if (vha->flags.process_response_queue && 2249 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2250 qla24xx_process_response_queue(vha, rsp); 2251 2252 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2253 2254 return QLA_SUCCESS; 2255 2256 queuing_error: 2257 if (status & QDSS_GOT_Q_SPACE) { 2258 req->outstanding_cmds[handle] = NULL; 2259 req->cnt += req_cnt; 2260 } 2261 /* Cleanup will be performed by the caller (queuecommand) */ 2262 2263 qla_put_iocbs(sp->qpair, &sp->iores); 2264 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2265 return QLA_FUNCTION_FAILED; 2266 } 2267 2268 /* Generic Control-SRB manipulation functions. */ 2269 2270 /* hardware_lock assumed to be held. */ 2271 2272 void * 2273 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) 2274 { 2275 scsi_qla_host_t *vha = qpair->vha; 2276 struct qla_hw_data *ha = vha->hw; 2277 struct req_que *req = qpair->req; 2278 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2279 uint32_t handle; 2280 request_t *pkt; 2281 uint16_t cnt, req_cnt; 2282 2283 pkt = NULL; 2284 req_cnt = 1; 2285 handle = 0; 2286 2287 if (sp && (sp->type != SRB_SCSI_CMD)) { 2288 /* Adjust entry-counts as needed. */ 2289 req_cnt = sp->iocbs; 2290 } 2291 2292 /* Check for room on request queue. */ 2293 if (req->cnt < req_cnt + 2) { 2294 if (qpair->use_shadow_reg) 2295 cnt = *req->out_ptr; 2296 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2297 IS_QLA28XX(ha)) 2298 cnt = rd_reg_dword(®->isp25mq.req_q_out); 2299 else if (IS_P3P_TYPE(ha)) 2300 cnt = rd_reg_dword(reg->isp82.req_q_out); 2301 else if (IS_FWI2_CAPABLE(ha)) 2302 cnt = rd_reg_dword(®->isp24.req_q_out); 2303 else if (IS_QLAFX00(ha)) 2304 cnt = rd_reg_dword(®->ispfx00.req_q_out); 2305 else 2306 cnt = qla2x00_debounce_register( 2307 ISP_REQ_Q_OUT(ha, ®->isp)); 2308 2309 if (req->ring_index < cnt) 2310 req->cnt = cnt - req->ring_index; 2311 else 2312 req->cnt = req->length - 2313 (req->ring_index - cnt); 2314 } 2315 if (req->cnt < req_cnt + 2) 2316 goto queuing_error; 2317 2318 if (sp) { 2319 handle = qla2xxx_get_next_handle(req); 2320 if (handle == 0) { 2321 ql_log(ql_log_warn, vha, 0x700b, 2322 "No room on outstanding cmd array.\n"); 2323 goto queuing_error; 2324 } 2325 2326 /* Prep command array. */ 2327 req->current_outstanding_cmd = handle; 2328 req->outstanding_cmds[handle] = sp; 2329 sp->handle = handle; 2330 } 2331 2332 /* Prep packet */ 2333 req->cnt -= req_cnt; 2334 pkt = req->ring_ptr; 2335 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2336 if (IS_QLAFX00(ha)) { 2337 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); 2338 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); 2339 } else { 2340 pkt->entry_count = req_cnt; 2341 pkt->handle = handle; 2342 } 2343 2344 return pkt; 2345 2346 queuing_error: 2347 qpair->tgt_counters.num_alloc_iocb_failed++; 2348 return pkt; 2349 } 2350 2351 void * 2352 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) 2353 { 2354 scsi_qla_host_t *vha = qpair->vha; 2355 2356 if (qla2x00_reset_active(vha)) 2357 return NULL; 2358 2359 return __qla2x00_alloc_iocbs(qpair, sp); 2360 } 2361 2362 void * 2363 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) 2364 { 2365 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); 2366 } 2367 2368 static void 2369 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2370 { 2371 struct srb_iocb *lio = &sp->u.iocb_cmd; 2372 2373 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2374 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2375 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { 2376 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); 2377 if (sp->vha->flags.nvme_first_burst) 2378 logio->io_parameter[0] = 2379 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); 2380 if (sp->vha->flags.nvme2_enabled) { 2381 /* Set service parameter BIT_8 for SLER support */ 2382 logio->io_parameter[0] |= 2383 cpu_to_le32(NVME_PRLI_SP_SLER); 2384 /* Set service parameter BIT_9 for PI control support */ 2385 logio->io_parameter[0] |= 2386 cpu_to_le32(NVME_PRLI_SP_PI_CTRL); 2387 } 2388 } 2389 2390 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2391 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2392 logio->port_id[1] = sp->fcport->d_id.b.area; 2393 logio->port_id[2] = sp->fcport->d_id.b.domain; 2394 logio->vp_index = sp->vha->vp_idx; 2395 } 2396 2397 static void 2398 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2399 { 2400 struct srb_iocb *lio = &sp->u.iocb_cmd; 2401 2402 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2403 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2404 2405 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { 2406 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2407 } else { 2408 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2409 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2410 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2411 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2412 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2413 } 2414 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2415 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2416 logio->port_id[1] = sp->fcport->d_id.b.area; 2417 logio->port_id[2] = sp->fcport->d_id.b.domain; 2418 logio->vp_index = sp->vha->vp_idx; 2419 } 2420 2421 static void 2422 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2423 { 2424 struct qla_hw_data *ha = sp->vha->hw; 2425 struct srb_iocb *lio = &sp->u.iocb_cmd; 2426 uint16_t opts; 2427 2428 mbx->entry_type = MBX_IOCB_TYPE; 2429 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2430 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2431 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2432 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2433 if (HAS_EXTENDED_IDS(ha)) { 2434 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2435 mbx->mb10 = cpu_to_le16(opts); 2436 } else { 2437 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2438 } 2439 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2440 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2441 sp->fcport->d_id.b.al_pa); 2442 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2443 } 2444 2445 static void 2446 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2447 { 2448 u16 control_flags = LCF_COMMAND_LOGO; 2449 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2450 2451 if (sp->fcport->explicit_logout) { 2452 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; 2453 } else { 2454 control_flags |= LCF_IMPL_LOGO; 2455 2456 if (!sp->fcport->keep_nport_handle) 2457 control_flags |= LCF_FREE_NPORT; 2458 } 2459 2460 logio->control_flags = cpu_to_le16(control_flags); 2461 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2462 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2463 logio->port_id[1] = sp->fcport->d_id.b.area; 2464 logio->port_id[2] = sp->fcport->d_id.b.domain; 2465 logio->vp_index = sp->vha->vp_idx; 2466 } 2467 2468 static void 2469 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2470 { 2471 struct qla_hw_data *ha = sp->vha->hw; 2472 2473 mbx->entry_type = MBX_IOCB_TYPE; 2474 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2475 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2476 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2477 cpu_to_le16(sp->fcport->loop_id) : 2478 cpu_to_le16(sp->fcport->loop_id << 8); 2479 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2480 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2481 sp->fcport->d_id.b.al_pa); 2482 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2483 /* Implicit: mbx->mbx10 = 0. */ 2484 } 2485 2486 static void 2487 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2488 { 2489 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2490 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2491 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2492 logio->vp_index = sp->vha->vp_idx; 2493 } 2494 2495 static void 2496 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2497 { 2498 struct qla_hw_data *ha = sp->vha->hw; 2499 2500 mbx->entry_type = MBX_IOCB_TYPE; 2501 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2502 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2503 if (HAS_EXTENDED_IDS(ha)) { 2504 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2505 mbx->mb10 = cpu_to_le16(BIT_0); 2506 } else { 2507 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2508 } 2509 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2510 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2511 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2512 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2513 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2514 } 2515 2516 static void 2517 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2518 { 2519 uint32_t flags; 2520 uint64_t lun; 2521 struct fc_port *fcport = sp->fcport; 2522 scsi_qla_host_t *vha = fcport->vha; 2523 struct qla_hw_data *ha = vha->hw; 2524 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2525 struct req_que *req = vha->req; 2526 2527 flags = iocb->u.tmf.flags; 2528 lun = iocb->u.tmf.lun; 2529 2530 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2531 tsk->entry_count = 1; 2532 tsk->handle = make_handle(req->id, tsk->handle); 2533 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2534 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2535 tsk->control_flags = cpu_to_le32(flags); 2536 tsk->port_id[0] = fcport->d_id.b.al_pa; 2537 tsk->port_id[1] = fcport->d_id.b.area; 2538 tsk->port_id[2] = fcport->d_id.b.domain; 2539 tsk->vp_index = fcport->vha->vp_idx; 2540 2541 if (flags == TCF_LUN_RESET) { 2542 int_to_scsilun(lun, &tsk->lun); 2543 host_to_fcp_swap((uint8_t *)&tsk->lun, 2544 sizeof(tsk->lun)); 2545 } 2546 } 2547 2548 void qla2x00_init_timer(srb_t *sp, unsigned long tmo) 2549 { 2550 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 2551 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 2552 sp->free = qla2x00_sp_free; 2553 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2554 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2555 sp->start_timer = 1; 2556 } 2557 2558 static void qla2x00_els_dcmd_sp_free(srb_t *sp) 2559 { 2560 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2561 2562 kfree(sp->fcport); 2563 2564 if (elsio->u.els_logo.els_logo_pyld) 2565 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, 2566 elsio->u.els_logo.els_logo_pyld, 2567 elsio->u.els_logo.els_logo_pyld_dma); 2568 2569 del_timer(&elsio->timer); 2570 qla2x00_rel_sp(sp); 2571 } 2572 2573 static void 2574 qla2x00_els_dcmd_iocb_timeout(void *data) 2575 { 2576 srb_t *sp = data; 2577 fc_port_t *fcport = sp->fcport; 2578 struct scsi_qla_host *vha = sp->vha; 2579 struct srb_iocb *lio = &sp->u.iocb_cmd; 2580 unsigned long flags = 0; 2581 int res, h; 2582 2583 ql_dbg(ql_dbg_io, vha, 0x3069, 2584 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2585 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2586 fcport->d_id.b.al_pa); 2587 2588 /* Abort the exchange */ 2589 res = qla24xx_async_abort_cmd(sp, false); 2590 if (res) { 2591 ql_dbg(ql_dbg_io, vha, 0x3070, 2592 "mbx abort_command failed.\n"); 2593 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2594 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2595 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2596 sp->qpair->req->outstanding_cmds[h] = NULL; 2597 break; 2598 } 2599 } 2600 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2601 complete(&lio->u.els_logo.comp); 2602 } else { 2603 ql_dbg(ql_dbg_io, vha, 0x3071, 2604 "mbx abort_command success.\n"); 2605 } 2606 } 2607 2608 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) 2609 { 2610 fc_port_t *fcport = sp->fcport; 2611 struct srb_iocb *lio = &sp->u.iocb_cmd; 2612 struct scsi_qla_host *vha = sp->vha; 2613 2614 ql_dbg(ql_dbg_io, vha, 0x3072, 2615 "%s hdl=%x, portid=%02x%02x%02x done\n", 2616 sp->name, sp->handle, fcport->d_id.b.domain, 2617 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2618 2619 complete(&lio->u.els_logo.comp); 2620 } 2621 2622 int 2623 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2624 port_id_t remote_did) 2625 { 2626 srb_t *sp; 2627 fc_port_t *fcport = NULL; 2628 struct srb_iocb *elsio = NULL; 2629 struct qla_hw_data *ha = vha->hw; 2630 struct els_logo_payload logo_pyld; 2631 int rval = QLA_SUCCESS; 2632 2633 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2634 if (!fcport) { 2635 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2636 return -ENOMEM; 2637 } 2638 2639 /* Alloc SRB structure */ 2640 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2641 if (!sp) { 2642 kfree(fcport); 2643 ql_log(ql_log_info, vha, 0x70e6, 2644 "SRB allocation failed\n"); 2645 return -ENOMEM; 2646 } 2647 2648 elsio = &sp->u.iocb_cmd; 2649 fcport->loop_id = 0xFFFF; 2650 fcport->d_id.b.domain = remote_did.b.domain; 2651 fcport->d_id.b.area = remote_did.b.area; 2652 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2653 2654 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2655 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2656 2657 sp->type = SRB_ELS_DCMD; 2658 sp->name = "ELS_DCMD"; 2659 sp->fcport = fcport; 2660 elsio->timeout = qla2x00_els_dcmd_iocb_timeout; 2661 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); 2662 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 2663 sp->done = qla2x00_els_dcmd_sp_done; 2664 sp->free = qla2x00_els_dcmd_sp_free; 2665 2666 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2667 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2668 GFP_KERNEL); 2669 2670 if (!elsio->u.els_logo.els_logo_pyld) { 2671 sp->free(sp); 2672 return QLA_FUNCTION_FAILED; 2673 } 2674 2675 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2676 2677 elsio->u.els_logo.els_cmd = els_opcode; 2678 logo_pyld.opcode = els_opcode; 2679 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2680 logo_pyld.s_id[1] = vha->d_id.b.area; 2681 logo_pyld.s_id[2] = vha->d_id.b.domain; 2682 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2683 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2684 2685 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2686 sizeof(struct els_logo_payload)); 2687 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); 2688 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, 2689 elsio->u.els_logo.els_logo_pyld, 2690 sizeof(*elsio->u.els_logo.els_logo_pyld)); 2691 2692 rval = qla2x00_start_sp(sp); 2693 if (rval != QLA_SUCCESS) { 2694 sp->free(sp); 2695 return QLA_FUNCTION_FAILED; 2696 } 2697 2698 ql_dbg(ql_dbg_io, vha, 0x3074, 2699 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2700 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2701 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2702 2703 wait_for_completion(&elsio->u.els_logo.comp); 2704 2705 sp->free(sp); 2706 return rval; 2707 } 2708 2709 static void 2710 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2711 { 2712 scsi_qla_host_t *vha = sp->vha; 2713 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2714 2715 els_iocb->entry_type = ELS_IOCB_TYPE; 2716 els_iocb->entry_count = 1; 2717 els_iocb->sys_define = 0; 2718 els_iocb->entry_status = 0; 2719 els_iocb->handle = sp->handle; 2720 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2721 els_iocb->tx_dsd_count = cpu_to_le16(1); 2722 els_iocb->vp_index = vha->vp_idx; 2723 els_iocb->sof_type = EST_SOFI3; 2724 els_iocb->rx_dsd_count = 0; 2725 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2726 2727 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 2728 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 2729 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 2730 /* For SID the byte order is different than DID */ 2731 els_iocb->s_id[1] = vha->d_id.b.al_pa; 2732 els_iocb->s_id[2] = vha->d_id.b.area; 2733 els_iocb->s_id[0] = vha->d_id.b.domain; 2734 2735 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2736 els_iocb->control_flags = 0; 2737 els_iocb->tx_byte_count = els_iocb->tx_len = 2738 cpu_to_le32(sizeof(struct els_plogi_payload)); 2739 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2740 &els_iocb->tx_address); 2741 els_iocb->rx_dsd_count = cpu_to_le16(1); 2742 els_iocb->rx_byte_count = els_iocb->rx_len = 2743 cpu_to_le32(sizeof(struct els_plogi_payload)); 2744 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2745 &els_iocb->rx_address); 2746 2747 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, 2748 "PLOGI ELS IOCB:\n"); 2749 ql_dump_buffer(ql_log_info, vha, 0x0109, 2750 (uint8_t *)els_iocb, 2751 sizeof(*els_iocb)); 2752 } else { 2753 els_iocb->control_flags = cpu_to_le16(1 << 13); 2754 els_iocb->tx_byte_count = 2755 cpu_to_le32(sizeof(struct els_logo_payload)); 2756 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2757 &els_iocb->tx_address); 2758 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2759 2760 els_iocb->rx_byte_count = 0; 2761 els_iocb->rx_address = 0; 2762 els_iocb->rx_len = 0; 2763 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, 2764 "LOGO ELS IOCB:"); 2765 ql_dump_buffer(ql_log_info, vha, 0x010b, 2766 els_iocb, 2767 sizeof(*els_iocb)); 2768 } 2769 2770 sp->vha->qla_stats.control_requests++; 2771 } 2772 2773 static void 2774 qla2x00_els_dcmd2_iocb_timeout(void *data) 2775 { 2776 srb_t *sp = data; 2777 fc_port_t *fcport = sp->fcport; 2778 struct scsi_qla_host *vha = sp->vha; 2779 unsigned long flags = 0; 2780 int res, h; 2781 2782 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, 2783 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", 2784 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); 2785 2786 /* Abort the exchange */ 2787 res = qla24xx_async_abort_cmd(sp, false); 2788 ql_dbg(ql_dbg_io, vha, 0x3070, 2789 "mbx abort_command %s\n", 2790 (res == QLA_SUCCESS) ? "successful" : "failed"); 2791 if (res) { 2792 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2793 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2794 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2795 sp->qpair->req->outstanding_cmds[h] = NULL; 2796 break; 2797 } 2798 } 2799 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2800 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2801 } 2802 } 2803 2804 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) 2805 { 2806 if (els_plogi->els_plogi_pyld) 2807 dma_free_coherent(&vha->hw->pdev->dev, 2808 els_plogi->tx_size, 2809 els_plogi->els_plogi_pyld, 2810 els_plogi->els_plogi_pyld_dma); 2811 2812 if (els_plogi->els_resp_pyld) 2813 dma_free_coherent(&vha->hw->pdev->dev, 2814 els_plogi->rx_size, 2815 els_plogi->els_resp_pyld, 2816 els_plogi->els_resp_pyld_dma); 2817 } 2818 2819 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) 2820 { 2821 fc_port_t *fcport = sp->fcport; 2822 struct srb_iocb *lio = &sp->u.iocb_cmd; 2823 struct scsi_qla_host *vha = sp->vha; 2824 struct event_arg ea; 2825 struct qla_work_evt *e; 2826 struct fc_port *conflict_fcport; 2827 port_id_t cid; /* conflict Nport id */ 2828 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; 2829 u16 lid; 2830 2831 ql_dbg(ql_dbg_disc, vha, 0x3072, 2832 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", 2833 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); 2834 2835 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 2836 del_timer(&sp->u.iocb_cmd.timer); 2837 2838 if (sp->flags & SRB_WAKEUP_ON_COMP) 2839 complete(&lio->u.els_plogi.comp); 2840 else { 2841 switch (le32_to_cpu(fw_status[0])) { 2842 case CS_DATA_UNDERRUN: 2843 case CS_COMPLETE: 2844 memset(&ea, 0, sizeof(ea)); 2845 ea.fcport = fcport; 2846 ea.rc = res; 2847 qla_handle_els_plogi_done(vha, &ea); 2848 break; 2849 2850 case CS_IOCB_ERROR: 2851 switch (le32_to_cpu(fw_status[1])) { 2852 case LSC_SCODE_PORTID_USED: 2853 lid = le32_to_cpu(fw_status[2]) & 0xffff; 2854 qlt_find_sess_invalidate_other(vha, 2855 wwn_to_u64(fcport->port_name), 2856 fcport->d_id, lid, &conflict_fcport); 2857 if (conflict_fcport) { 2858 /* 2859 * Another fcport shares the same 2860 * loop_id & nport id; conflict 2861 * fcport needs to finish cleanup 2862 * before this fcport can proceed 2863 * to login. 2864 */ 2865 conflict_fcport->conflict = fcport; 2866 fcport->login_pause = 1; 2867 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2868 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", 2869 __func__, __LINE__, 2870 fcport->port_name, 2871 fcport->d_id.b24, lid); 2872 } else { 2873 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2874 "%s %d %8phC pid %06x inuse with lid %#x sched del\n", 2875 __func__, __LINE__, 2876 fcport->port_name, 2877 fcport->d_id.b24, lid); 2878 qla2x00_clear_loop_id(fcport); 2879 set_bit(lid, vha->hw->loop_id_map); 2880 fcport->loop_id = lid; 2881 fcport->keep_nport_handle = 0; 2882 qlt_schedule_sess_for_deletion(fcport); 2883 } 2884 break; 2885 2886 case LSC_SCODE_NPORT_USED: 2887 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) 2888 & 0xff; 2889 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) 2890 & 0xff; 2891 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; 2892 cid.b.rsvd_1 = 0; 2893 2894 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2895 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2896 __func__, __LINE__, fcport->port_name, 2897 fcport->loop_id, cid.b24); 2898 set_bit(fcport->loop_id, 2899 vha->hw->loop_id_map); 2900 fcport->loop_id = FC_NO_LOOP_ID; 2901 qla24xx_post_gnl_work(vha, fcport); 2902 break; 2903 2904 case LSC_SCODE_NOXCB: 2905 vha->hw->exch_starvation++; 2906 if (vha->hw->exch_starvation > 5) { 2907 ql_log(ql_log_warn, vha, 0xd046, 2908 "Exchange starvation. Resetting RISC\n"); 2909 vha->hw->exch_starvation = 0; 2910 set_bit(ISP_ABORT_NEEDED, 2911 &vha->dpc_flags); 2912 qla2xxx_wake_dpc(vha); 2913 } 2914 fallthrough; 2915 default: 2916 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2917 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", 2918 __func__, sp->fcport->port_name, 2919 fw_status[0], fw_status[1], fw_status[2]); 2920 2921 fcport->flags &= ~FCF_ASYNC_SENT; 2922 qla2x00_set_fcport_disc_state(fcport, 2923 DSC_LOGIN_FAILED); 2924 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2925 break; 2926 } 2927 break; 2928 2929 default: 2930 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2931 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", 2932 __func__, sp->fcport->port_name, 2933 fw_status[0], fw_status[1], fw_status[2]); 2934 2935 sp->fcport->flags &= ~FCF_ASYNC_SENT; 2936 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); 2937 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2938 break; 2939 } 2940 2941 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 2942 if (!e) { 2943 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2944 2945 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 2946 sp->free(sp); 2947 return; 2948 } 2949 e->u.iosb.sp = sp; 2950 qla2x00_post_work(vha, e); 2951 } 2952 } 2953 2954 int 2955 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, 2956 fc_port_t *fcport, bool wait) 2957 { 2958 srb_t *sp; 2959 struct srb_iocb *elsio = NULL; 2960 struct qla_hw_data *ha = vha->hw; 2961 int rval = QLA_SUCCESS; 2962 void *ptr, *resp_ptr; 2963 2964 /* Alloc SRB structure */ 2965 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2966 if (!sp) { 2967 ql_log(ql_log_info, vha, 0x70e6, 2968 "SRB allocation failed\n"); 2969 fcport->flags &= ~FCF_ASYNC_ACTIVE; 2970 return -ENOMEM; 2971 } 2972 2973 fcport->flags |= FCF_ASYNC_SENT; 2974 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 2975 elsio = &sp->u.iocb_cmd; 2976 ql_dbg(ql_dbg_io, vha, 0x3073, 2977 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); 2978 2979 sp->type = SRB_ELS_DCMD; 2980 sp->name = "ELS_DCMD"; 2981 sp->fcport = fcport; 2982 2983 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; 2984 if (wait) 2985 sp->flags = SRB_WAKEUP_ON_COMP; 2986 2987 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); 2988 2989 sp->done = qla2x00_els_dcmd2_sp_done; 2990 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; 2991 2992 ptr = elsio->u.els_plogi.els_plogi_pyld = 2993 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, 2994 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 2995 2996 if (!elsio->u.els_plogi.els_plogi_pyld) { 2997 rval = QLA_FUNCTION_FAILED; 2998 goto out; 2999 } 3000 3001 resp_ptr = elsio->u.els_plogi.els_resp_pyld = 3002 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, 3003 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); 3004 3005 if (!elsio->u.els_plogi.els_resp_pyld) { 3006 rval = QLA_FUNCTION_FAILED; 3007 goto out; 3008 } 3009 3010 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); 3011 3012 memset(ptr, 0, sizeof(struct els_plogi_payload)); 3013 memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); 3014 memcpy(elsio->u.els_plogi.els_plogi_pyld->data, 3015 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); 3016 3017 elsio->u.els_plogi.els_cmd = els_opcode; 3018 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; 3019 3020 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); 3021 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, 3022 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 3023 sizeof(*elsio->u.els_plogi.els_plogi_pyld)); 3024 3025 init_completion(&elsio->u.els_plogi.comp); 3026 rval = qla2x00_start_sp(sp); 3027 if (rval != QLA_SUCCESS) { 3028 rval = QLA_FUNCTION_FAILED; 3029 } else { 3030 ql_dbg(ql_dbg_disc, vha, 0x3074, 3031 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", 3032 sp->name, sp->handle, fcport->loop_id, 3033 fcport->d_id.b24, vha->d_id.b24); 3034 } 3035 3036 if (wait) { 3037 wait_for_completion(&elsio->u.els_plogi.comp); 3038 3039 if (elsio->u.els_plogi.comp_status != CS_COMPLETE) 3040 rval = QLA_FUNCTION_FAILED; 3041 } else { 3042 goto done; 3043 } 3044 3045 out: 3046 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 3047 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 3048 sp->free(sp); 3049 done: 3050 return rval; 3051 } 3052 3053 static void 3054 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 3055 { 3056 struct bsg_job *bsg_job = sp->u.bsg_job; 3057 struct fc_bsg_request *bsg_request = bsg_job->request; 3058 3059 els_iocb->entry_type = ELS_IOCB_TYPE; 3060 els_iocb->entry_count = 1; 3061 els_iocb->sys_define = 0; 3062 els_iocb->entry_status = 0; 3063 els_iocb->handle = sp->handle; 3064 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3065 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3066 els_iocb->vp_index = sp->vha->vp_idx; 3067 els_iocb->sof_type = EST_SOFI3; 3068 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3069 3070 els_iocb->opcode = 3071 sp->type == SRB_ELS_CMD_RPT ? 3072 bsg_request->rqst_data.r_els.els_code : 3073 bsg_request->rqst_data.h_els.command_code; 3074 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 3075 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 3076 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 3077 els_iocb->control_flags = 0; 3078 els_iocb->rx_byte_count = 3079 cpu_to_le32(bsg_job->reply_payload.payload_len); 3080 els_iocb->tx_byte_count = 3081 cpu_to_le32(bsg_job->request_payload.payload_len); 3082 3083 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3084 &els_iocb->tx_address); 3085 els_iocb->tx_len = cpu_to_le32(sg_dma_len 3086 (bsg_job->request_payload.sg_list)); 3087 3088 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3089 &els_iocb->rx_address); 3090 els_iocb->rx_len = cpu_to_le32(sg_dma_len 3091 (bsg_job->reply_payload.sg_list)); 3092 3093 sp->vha->qla_stats.control_requests++; 3094 } 3095 3096 static void 3097 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 3098 { 3099 uint16_t avail_dsds; 3100 struct dsd64 *cur_dsd; 3101 struct scatterlist *sg; 3102 int index; 3103 uint16_t tot_dsds; 3104 scsi_qla_host_t *vha = sp->vha; 3105 struct qla_hw_data *ha = vha->hw; 3106 struct bsg_job *bsg_job = sp->u.bsg_job; 3107 int entry_count = 1; 3108 3109 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 3110 ct_iocb->entry_type = CT_IOCB_TYPE; 3111 ct_iocb->entry_status = 0; 3112 ct_iocb->handle1 = sp->handle; 3113 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 3114 ct_iocb->status = cpu_to_le16(0); 3115 ct_iocb->control_flags = cpu_to_le16(0); 3116 ct_iocb->timeout = 0; 3117 ct_iocb->cmd_dsd_count = 3118 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3119 ct_iocb->total_dsd_count = 3120 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 3121 ct_iocb->req_bytecount = 3122 cpu_to_le32(bsg_job->request_payload.payload_len); 3123 ct_iocb->rsp_bytecount = 3124 cpu_to_le32(bsg_job->reply_payload.payload_len); 3125 3126 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3127 &ct_iocb->req_dsd.address); 3128 ct_iocb->req_dsd.length = ct_iocb->req_bytecount; 3129 3130 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3131 &ct_iocb->rsp_dsd.address); 3132 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; 3133 3134 avail_dsds = 1; 3135 cur_dsd = &ct_iocb->rsp_dsd; 3136 index = 0; 3137 tot_dsds = bsg_job->reply_payload.sg_cnt; 3138 3139 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 3140 cont_a64_entry_t *cont_pkt; 3141 3142 /* Allocate additional continuation packets? */ 3143 if (avail_dsds == 0) { 3144 /* 3145 * Five DSDs are available in the Cont. 3146 * Type 1 IOCB. 3147 */ 3148 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3149 vha->hw->req_q_map[0]); 3150 cur_dsd = cont_pkt->dsd; 3151 avail_dsds = 5; 3152 entry_count++; 3153 } 3154 3155 append_dsd64(&cur_dsd, sg); 3156 avail_dsds--; 3157 } 3158 ct_iocb->entry_count = entry_count; 3159 3160 sp->vha->qla_stats.control_requests++; 3161 } 3162 3163 static void 3164 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 3165 { 3166 uint16_t avail_dsds; 3167 struct dsd64 *cur_dsd; 3168 struct scatterlist *sg; 3169 int index; 3170 uint16_t cmd_dsds, rsp_dsds; 3171 scsi_qla_host_t *vha = sp->vha; 3172 struct qla_hw_data *ha = vha->hw; 3173 struct bsg_job *bsg_job = sp->u.bsg_job; 3174 int entry_count = 1; 3175 cont_a64_entry_t *cont_pkt = NULL; 3176 3177 ct_iocb->entry_type = CT_IOCB_TYPE; 3178 ct_iocb->entry_status = 0; 3179 ct_iocb->sys_define = 0; 3180 ct_iocb->handle = sp->handle; 3181 3182 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3183 ct_iocb->vp_index = sp->vha->vp_idx; 3184 ct_iocb->comp_status = cpu_to_le16(0); 3185 3186 cmd_dsds = bsg_job->request_payload.sg_cnt; 3187 rsp_dsds = bsg_job->reply_payload.sg_cnt; 3188 3189 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); 3190 ct_iocb->timeout = 0; 3191 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); 3192 ct_iocb->cmd_byte_count = 3193 cpu_to_le32(bsg_job->request_payload.payload_len); 3194 3195 avail_dsds = 2; 3196 cur_dsd = ct_iocb->dsd; 3197 index = 0; 3198 3199 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3200 /* Allocate additional continuation packets? */ 3201 if (avail_dsds == 0) { 3202 /* 3203 * Five DSDs are available in the Cont. 3204 * Type 1 IOCB. 3205 */ 3206 cont_pkt = qla2x00_prep_cont_type1_iocb( 3207 vha, ha->req_q_map[0]); 3208 cur_dsd = cont_pkt->dsd; 3209 avail_dsds = 5; 3210 entry_count++; 3211 } 3212 3213 append_dsd64(&cur_dsd, sg); 3214 avail_dsds--; 3215 } 3216 3217 index = 0; 3218 3219 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3220 /* Allocate additional continuation packets? */ 3221 if (avail_dsds == 0) { 3222 /* 3223 * Five DSDs are available in the Cont. 3224 * Type 1 IOCB. 3225 */ 3226 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3227 ha->req_q_map[0]); 3228 cur_dsd = cont_pkt->dsd; 3229 avail_dsds = 5; 3230 entry_count++; 3231 } 3232 3233 append_dsd64(&cur_dsd, sg); 3234 avail_dsds--; 3235 } 3236 ct_iocb->entry_count = entry_count; 3237 } 3238 3239 /* 3240 * qla82xx_start_scsi() - Send a SCSI command to the ISP 3241 * @sp: command to send to the ISP 3242 * 3243 * Returns non-zero if a failure occurred, else zero. 3244 */ 3245 int 3246 qla82xx_start_scsi(srb_t *sp) 3247 { 3248 int nseg; 3249 unsigned long flags; 3250 struct scsi_cmnd *cmd; 3251 uint32_t *clr_ptr; 3252 uint32_t handle; 3253 uint16_t cnt; 3254 uint16_t req_cnt; 3255 uint16_t tot_dsds; 3256 struct device_reg_82xx __iomem *reg; 3257 uint32_t dbval; 3258 __be32 *fcp_dl; 3259 uint8_t additional_cdb_len; 3260 struct ct6_dsd *ctx; 3261 struct scsi_qla_host *vha = sp->vha; 3262 struct qla_hw_data *ha = vha->hw; 3263 struct req_que *req = NULL; 3264 struct rsp_que *rsp = NULL; 3265 3266 /* Setup device pointers. */ 3267 reg = &ha->iobase->isp82; 3268 cmd = GET_CMD_SP(sp); 3269 req = vha->req; 3270 rsp = ha->rsp_q_map[0]; 3271 3272 /* So we know we haven't pci_map'ed anything yet */ 3273 tot_dsds = 0; 3274 3275 dbval = 0x04 | (ha->portnum << 5); 3276 3277 /* Send marker if required */ 3278 if (vha->marker_needed != 0) { 3279 if (qla2x00_marker(vha, ha->base_qpair, 3280 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 3281 ql_log(ql_log_warn, vha, 0x300c, 3282 "qla2x00_marker failed for cmd=%p.\n", cmd); 3283 return QLA_FUNCTION_FAILED; 3284 } 3285 vha->marker_needed = 0; 3286 } 3287 3288 /* Acquire ring specific lock */ 3289 spin_lock_irqsave(&ha->hardware_lock, flags); 3290 3291 handle = qla2xxx_get_next_handle(req); 3292 if (handle == 0) 3293 goto queuing_error; 3294 3295 /* Map the sg table so we have an accurate count of sg entries needed */ 3296 if (scsi_sg_count(cmd)) { 3297 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3298 scsi_sg_count(cmd), cmd->sc_data_direction); 3299 if (unlikely(!nseg)) 3300 goto queuing_error; 3301 } else 3302 nseg = 0; 3303 3304 tot_dsds = nseg; 3305 3306 if (tot_dsds > ql2xshiftctondsd) { 3307 struct cmd_type_6 *cmd_pkt; 3308 uint16_t more_dsd_lists = 0; 3309 struct dsd_dma *dsd_ptr; 3310 uint16_t i; 3311 3312 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 3313 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 3314 ql_dbg(ql_dbg_io, vha, 0x300d, 3315 "Num of DSD list %d is than %d for cmd=%p.\n", 3316 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 3317 cmd); 3318 goto queuing_error; 3319 } 3320 3321 if (more_dsd_lists <= ha->gbl_dsd_avail) 3322 goto sufficient_dsds; 3323 else 3324 more_dsd_lists -= ha->gbl_dsd_avail; 3325 3326 for (i = 0; i < more_dsd_lists; i++) { 3327 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 3328 if (!dsd_ptr) { 3329 ql_log(ql_log_fatal, vha, 0x300e, 3330 "Failed to allocate memory for dsd_dma " 3331 "for cmd=%p.\n", cmd); 3332 goto queuing_error; 3333 } 3334 3335 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 3336 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 3337 if (!dsd_ptr->dsd_addr) { 3338 kfree(dsd_ptr); 3339 ql_log(ql_log_fatal, vha, 0x300f, 3340 "Failed to allocate memory for dsd_addr " 3341 "for cmd=%p.\n", cmd); 3342 goto queuing_error; 3343 } 3344 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 3345 ha->gbl_dsd_avail++; 3346 } 3347 3348 sufficient_dsds: 3349 req_cnt = 1; 3350 3351 if (req->cnt < (req_cnt + 2)) { 3352 cnt = (uint16_t)rd_reg_dword_relaxed( 3353 ®->req_q_out[0]); 3354 if (req->ring_index < cnt) 3355 req->cnt = cnt - req->ring_index; 3356 else 3357 req->cnt = req->length - 3358 (req->ring_index - cnt); 3359 if (req->cnt < (req_cnt + 2)) 3360 goto queuing_error; 3361 } 3362 3363 ctx = sp->u.scmd.ct6_ctx = 3364 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 3365 if (!ctx) { 3366 ql_log(ql_log_fatal, vha, 0x3010, 3367 "Failed to allocate ctx for cmd=%p.\n", cmd); 3368 goto queuing_error; 3369 } 3370 3371 memset(ctx, 0, sizeof(struct ct6_dsd)); 3372 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, 3373 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 3374 if (!ctx->fcp_cmnd) { 3375 ql_log(ql_log_fatal, vha, 0x3011, 3376 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 3377 goto queuing_error; 3378 } 3379 3380 /* Initialize the DSD list and dma handle */ 3381 INIT_LIST_HEAD(&ctx->dsd_list); 3382 ctx->dsd_use_cnt = 0; 3383 3384 if (cmd->cmd_len > 16) { 3385 additional_cdb_len = cmd->cmd_len - 16; 3386 if ((cmd->cmd_len % 4) != 0) { 3387 /* SCSI command bigger than 16 bytes must be 3388 * multiple of 4 3389 */ 3390 ql_log(ql_log_warn, vha, 0x3012, 3391 "scsi cmd len %d not multiple of 4 " 3392 "for cmd=%p.\n", cmd->cmd_len, cmd); 3393 goto queuing_error_fcp_cmnd; 3394 } 3395 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 3396 } else { 3397 additional_cdb_len = 0; 3398 ctx->fcp_cmnd_len = 12 + 16 + 4; 3399 } 3400 3401 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 3402 cmd_pkt->handle = make_handle(req->id, handle); 3403 3404 /* Zero out remaining portion of packet. */ 3405 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3406 clr_ptr = (uint32_t *)cmd_pkt + 2; 3407 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3408 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3409 3410 /* Set NPORT-ID and LUN number*/ 3411 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3412 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3413 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3414 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3415 cmd_pkt->vp_index = sp->vha->vp_idx; 3416 3417 /* Build IOCB segments */ 3418 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 3419 goto queuing_error_fcp_cmnd; 3420 3421 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3422 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 3423 3424 /* build FCP_CMND IU */ 3425 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 3426 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 3427 3428 if (cmd->sc_data_direction == DMA_TO_DEVICE) 3429 ctx->fcp_cmnd->additional_cdb_len |= 1; 3430 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 3431 ctx->fcp_cmnd->additional_cdb_len |= 2; 3432 3433 /* Populate the FCP_PRIO. */ 3434 if (ha->flags.fcp_prio_enabled) 3435 ctx->fcp_cmnd->task_attribute |= 3436 sp->fcport->fcp_prio << 3; 3437 3438 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 3439 3440 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + 3441 additional_cdb_len); 3442 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 3443 3444 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 3445 put_unaligned_le64(ctx->fcp_cmnd_dma, 3446 &cmd_pkt->fcp_cmnd_dseg_address); 3447 3448 sp->flags |= SRB_FCP_CMND_DMA_VALID; 3449 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3450 /* Set total data segment count. */ 3451 cmd_pkt->entry_count = (uint8_t)req_cnt; 3452 /* Specify response queue number where 3453 * completion should happen 3454 */ 3455 cmd_pkt->entry_status = (uint8_t) rsp->id; 3456 } else { 3457 struct cmd_type_7 *cmd_pkt; 3458 3459 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3460 if (req->cnt < (req_cnt + 2)) { 3461 cnt = (uint16_t)rd_reg_dword_relaxed( 3462 ®->req_q_out[0]); 3463 if (req->ring_index < cnt) 3464 req->cnt = cnt - req->ring_index; 3465 else 3466 req->cnt = req->length - 3467 (req->ring_index - cnt); 3468 } 3469 if (req->cnt < (req_cnt + 2)) 3470 goto queuing_error; 3471 3472 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 3473 cmd_pkt->handle = make_handle(req->id, handle); 3474 3475 /* Zero out remaining portion of packet. */ 3476 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3477 clr_ptr = (uint32_t *)cmd_pkt + 2; 3478 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3479 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3480 3481 /* Set NPORT-ID and LUN number*/ 3482 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3483 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3484 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3485 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3486 cmd_pkt->vp_index = sp->vha->vp_idx; 3487 3488 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3489 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 3490 sizeof(cmd_pkt->lun)); 3491 3492 /* Populate the FCP_PRIO. */ 3493 if (ha->flags.fcp_prio_enabled) 3494 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 3495 3496 /* Load SCSI command packet. */ 3497 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 3498 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 3499 3500 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3501 3502 /* Build IOCB segments */ 3503 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 3504 3505 /* Set total data segment count. */ 3506 cmd_pkt->entry_count = (uint8_t)req_cnt; 3507 /* Specify response queue number where 3508 * completion should happen. 3509 */ 3510 cmd_pkt->entry_status = (uint8_t) rsp->id; 3511 3512 } 3513 /* Build command packet. */ 3514 req->current_outstanding_cmd = handle; 3515 req->outstanding_cmds[handle] = sp; 3516 sp->handle = handle; 3517 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3518 req->cnt -= req_cnt; 3519 wmb(); 3520 3521 /* Adjust ring index. */ 3522 req->ring_index++; 3523 if (req->ring_index == req->length) { 3524 req->ring_index = 0; 3525 req->ring_ptr = req->ring; 3526 } else 3527 req->ring_ptr++; 3528 3529 sp->flags |= SRB_DMA_VALID; 3530 3531 /* Set chip new ring index. */ 3532 /* write, read and verify logic */ 3533 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3534 if (ql2xdbwr) 3535 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3536 else { 3537 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3538 wmb(); 3539 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { 3540 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3541 wmb(); 3542 } 3543 } 3544 3545 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3546 if (vha->flags.process_response_queue && 3547 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3548 qla24xx_process_response_queue(vha, rsp); 3549 3550 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3551 return QLA_SUCCESS; 3552 3553 queuing_error_fcp_cmnd: 3554 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3555 queuing_error: 3556 if (tot_dsds) 3557 scsi_dma_unmap(cmd); 3558 3559 if (sp->u.scmd.crc_ctx) { 3560 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); 3561 sp->u.scmd.crc_ctx = NULL; 3562 } 3563 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3564 3565 return QLA_FUNCTION_FAILED; 3566 } 3567 3568 static void 3569 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3570 { 3571 struct srb_iocb *aio = &sp->u.iocb_cmd; 3572 scsi_qla_host_t *vha = sp->vha; 3573 struct req_que *req = sp->qpair->req; 3574 3575 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3576 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3577 abt_iocb->entry_count = 1; 3578 abt_iocb->handle = make_handle(req->id, sp->handle); 3579 if (sp->fcport) { 3580 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3581 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3582 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3583 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3584 } 3585 abt_iocb->handle_to_abort = 3586 make_handle(le16_to_cpu(aio->u.abt.req_que_no), 3587 aio->u.abt.cmd_hndl); 3588 abt_iocb->vp_index = vha->vp_idx; 3589 abt_iocb->req_que_no = aio->u.abt.req_que_no; 3590 /* Send the command to the firmware */ 3591 wmb(); 3592 } 3593 3594 static void 3595 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) 3596 { 3597 int i, sz; 3598 3599 mbx->entry_type = MBX_IOCB_TYPE; 3600 mbx->handle = sp->handle; 3601 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); 3602 3603 for (i = 0; i < sz; i++) 3604 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; 3605 } 3606 3607 static void 3608 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) 3609 { 3610 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; 3611 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); 3612 ct_pkt->handle = sp->handle; 3613 } 3614 3615 static void qla2x00_send_notify_ack_iocb(srb_t *sp, 3616 struct nack_to_isp *nack) 3617 { 3618 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; 3619 3620 nack->entry_type = NOTIFY_ACK_TYPE; 3621 nack->entry_count = 1; 3622 nack->ox_id = ntfy->ox_id; 3623 3624 nack->u.isp24.handle = sp->handle; 3625 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3626 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3627 nack->u.isp24.flags = ntfy->u.isp24.flags & 3628 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3629 } 3630 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3631 nack->u.isp24.status = ntfy->u.isp24.status; 3632 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3633 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3634 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3635 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3636 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3637 nack->u.isp24.srr_flags = 0; 3638 nack->u.isp24.srr_reject_code = 0; 3639 nack->u.isp24.srr_reject_code_expl = 0; 3640 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3641 } 3642 3643 /* 3644 * Build NVME LS request 3645 */ 3646 static void 3647 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) 3648 { 3649 struct srb_iocb *nvme; 3650 3651 nvme = &sp->u.iocb_cmd; 3652 cmd_pkt->entry_type = PT_LS4_REQUEST; 3653 cmd_pkt->entry_count = 1; 3654 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); 3655 3656 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); 3657 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3658 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 3659 3660 cmd_pkt->tx_dseg_count = cpu_to_le16(1); 3661 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); 3662 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); 3663 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); 3664 3665 cmd_pkt->rx_dseg_count = cpu_to_le16(1); 3666 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); 3667 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); 3668 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); 3669 } 3670 3671 static void 3672 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) 3673 { 3674 int map, pos; 3675 3676 vce->entry_type = VP_CTRL_IOCB_TYPE; 3677 vce->handle = sp->handle; 3678 vce->entry_count = 1; 3679 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); 3680 vce->vp_count = cpu_to_le16(1); 3681 3682 /* 3683 * index map in firmware starts with 1; decrement index 3684 * this is ok as we never use index 0 3685 */ 3686 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; 3687 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; 3688 vce->vp_idx_map[map] |= 1 << pos; 3689 } 3690 3691 static void 3692 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) 3693 { 3694 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 3695 logio->control_flags = 3696 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); 3697 3698 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3699 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 3700 logio->port_id[1] = sp->fcport->d_id.b.area; 3701 logio->port_id[2] = sp->fcport->d_id.b.domain; 3702 logio->vp_index = sp->fcport->vha->vp_idx; 3703 } 3704 3705 int 3706 qla2x00_start_sp(srb_t *sp) 3707 { 3708 int rval = QLA_SUCCESS; 3709 scsi_qla_host_t *vha = sp->vha; 3710 struct qla_hw_data *ha = vha->hw; 3711 struct qla_qpair *qp = sp->qpair; 3712 void *pkt; 3713 unsigned long flags; 3714 3715 spin_lock_irqsave(qp->qp_lock_ptr, flags); 3716 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); 3717 if (!pkt) { 3718 rval = EAGAIN; 3719 ql_log(ql_log_warn, vha, 0x700c, 3720 "qla2x00_alloc_iocbs failed.\n"); 3721 goto done; 3722 } 3723 3724 switch (sp->type) { 3725 case SRB_LOGIN_CMD: 3726 IS_FWI2_CAPABLE(ha) ? 3727 qla24xx_login_iocb(sp, pkt) : 3728 qla2x00_login_iocb(sp, pkt); 3729 break; 3730 case SRB_PRLI_CMD: 3731 qla24xx_prli_iocb(sp, pkt); 3732 break; 3733 case SRB_LOGOUT_CMD: 3734 IS_FWI2_CAPABLE(ha) ? 3735 qla24xx_logout_iocb(sp, pkt) : 3736 qla2x00_logout_iocb(sp, pkt); 3737 break; 3738 case SRB_ELS_CMD_RPT: 3739 case SRB_ELS_CMD_HST: 3740 qla24xx_els_iocb(sp, pkt); 3741 break; 3742 case SRB_CT_CMD: 3743 IS_FWI2_CAPABLE(ha) ? 3744 qla24xx_ct_iocb(sp, pkt) : 3745 qla2x00_ct_iocb(sp, pkt); 3746 break; 3747 case SRB_ADISC_CMD: 3748 IS_FWI2_CAPABLE(ha) ? 3749 qla24xx_adisc_iocb(sp, pkt) : 3750 qla2x00_adisc_iocb(sp, pkt); 3751 break; 3752 case SRB_TM_CMD: 3753 IS_QLAFX00(ha) ? 3754 qlafx00_tm_iocb(sp, pkt) : 3755 qla24xx_tm_iocb(sp, pkt); 3756 break; 3757 case SRB_FXIOCB_DCMD: 3758 case SRB_FXIOCB_BCMD: 3759 qlafx00_fxdisc_iocb(sp, pkt); 3760 break; 3761 case SRB_NVME_LS: 3762 qla_nvme_ls(sp, pkt); 3763 break; 3764 case SRB_ABT_CMD: 3765 IS_QLAFX00(ha) ? 3766 qlafx00_abort_iocb(sp, pkt) : 3767 qla24xx_abort_iocb(sp, pkt); 3768 break; 3769 case SRB_ELS_DCMD: 3770 qla24xx_els_logo_iocb(sp, pkt); 3771 break; 3772 case SRB_CT_PTHRU_CMD: 3773 qla2x00_ctpthru_cmd_iocb(sp, pkt); 3774 break; 3775 case SRB_MB_IOCB: 3776 qla2x00_mb_iocb(sp, pkt); 3777 break; 3778 case SRB_NACK_PLOGI: 3779 case SRB_NACK_PRLI: 3780 case SRB_NACK_LOGO: 3781 qla2x00_send_notify_ack_iocb(sp, pkt); 3782 break; 3783 case SRB_CTRL_VP: 3784 qla25xx_ctrlvp_iocb(sp, pkt); 3785 break; 3786 case SRB_PRLO_CMD: 3787 qla24xx_prlo_iocb(sp, pkt); 3788 break; 3789 default: 3790 break; 3791 } 3792 3793 if (sp->start_timer) 3794 add_timer(&sp->u.iocb_cmd.timer); 3795 3796 wmb(); 3797 qla2x00_start_iocbs(vha, qp->req); 3798 done: 3799 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 3800 return rval; 3801 } 3802 3803 static void 3804 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 3805 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3806 { 3807 uint16_t avail_dsds; 3808 struct dsd64 *cur_dsd; 3809 uint32_t req_data_len = 0; 3810 uint32_t rsp_data_len = 0; 3811 struct scatterlist *sg; 3812 int index; 3813 int entry_count = 1; 3814 struct bsg_job *bsg_job = sp->u.bsg_job; 3815 3816 /*Update entry type to indicate bidir command */ 3817 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); 3818 3819 /* Set the transfer direction, in this set both flags 3820 * Also set the BD_WRAP_BACK flag, firmware will take care 3821 * assigning DID=SID for outgoing pkts. 3822 */ 3823 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3824 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3825 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 3826 BD_WRAP_BACK); 3827 3828 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 3829 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 3830 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 3831 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3832 3833 vha->bidi_stats.transfer_bytes += req_data_len; 3834 vha->bidi_stats.io_count++; 3835 3836 vha->qla_stats.output_bytes += req_data_len; 3837 vha->qla_stats.output_requests++; 3838 3839 /* Only one dsd is available for bidirectional IOCB, remaining dsds 3840 * are bundled in continuation iocb 3841 */ 3842 avail_dsds = 1; 3843 cur_dsd = &cmd_pkt->fcp_dsd; 3844 3845 index = 0; 3846 3847 for_each_sg(bsg_job->request_payload.sg_list, sg, 3848 bsg_job->request_payload.sg_cnt, index) { 3849 cont_a64_entry_t *cont_pkt; 3850 3851 /* Allocate additional continuation packets */ 3852 if (avail_dsds == 0) { 3853 /* Continuation type 1 IOCB can accomodate 3854 * 5 DSDS 3855 */ 3856 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3857 cur_dsd = cont_pkt->dsd; 3858 avail_dsds = 5; 3859 entry_count++; 3860 } 3861 append_dsd64(&cur_dsd, sg); 3862 avail_dsds--; 3863 } 3864 /* For read request DSD will always goes to continuation IOCB 3865 * and follow the write DSD. If there is room on the current IOCB 3866 * then it is added to that IOCB else new continuation IOCB is 3867 * allocated. 3868 */ 3869 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3870 bsg_job->reply_payload.sg_cnt, index) { 3871 cont_a64_entry_t *cont_pkt; 3872 3873 /* Allocate additional continuation packets */ 3874 if (avail_dsds == 0) { 3875 /* Continuation type 1 IOCB can accomodate 3876 * 5 DSDS 3877 */ 3878 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3879 cur_dsd = cont_pkt->dsd; 3880 avail_dsds = 5; 3881 entry_count++; 3882 } 3883 append_dsd64(&cur_dsd, sg); 3884 avail_dsds--; 3885 } 3886 /* This value should be same as number of IOCB required for this cmd */ 3887 cmd_pkt->entry_count = entry_count; 3888 } 3889 3890 int 3891 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 3892 { 3893 3894 struct qla_hw_data *ha = vha->hw; 3895 unsigned long flags; 3896 uint32_t handle; 3897 uint16_t req_cnt; 3898 uint16_t cnt; 3899 uint32_t *clr_ptr; 3900 struct cmd_bidir *cmd_pkt = NULL; 3901 struct rsp_que *rsp; 3902 struct req_que *req; 3903 int rval = EXT_STATUS_OK; 3904 3905 rval = QLA_SUCCESS; 3906 3907 rsp = ha->rsp_q_map[0]; 3908 req = vha->req; 3909 3910 /* Send marker if required */ 3911 if (vha->marker_needed != 0) { 3912 if (qla2x00_marker(vha, ha->base_qpair, 3913 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 3914 return EXT_STATUS_MAILBOX; 3915 vha->marker_needed = 0; 3916 } 3917 3918 /* Acquire ring specific lock */ 3919 spin_lock_irqsave(&ha->hardware_lock, flags); 3920 3921 handle = qla2xxx_get_next_handle(req); 3922 if (handle == 0) { 3923 rval = EXT_STATUS_BUSY; 3924 goto queuing_error; 3925 } 3926 3927 /* Calculate number of IOCB required */ 3928 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3929 3930 /* Check for room on request queue. */ 3931 if (req->cnt < req_cnt + 2) { 3932 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 3933 rd_reg_dword_relaxed(req->req_q_out); 3934 if (req->ring_index < cnt) 3935 req->cnt = cnt - req->ring_index; 3936 else 3937 req->cnt = req->length - 3938 (req->ring_index - cnt); 3939 } 3940 if (req->cnt < req_cnt + 2) { 3941 rval = EXT_STATUS_BUSY; 3942 goto queuing_error; 3943 } 3944 3945 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 3946 cmd_pkt->handle = make_handle(req->id, handle); 3947 3948 /* Zero out remaining portion of packet. */ 3949 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3950 clr_ptr = (uint32_t *)cmd_pkt + 2; 3951 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3952 3953 /* Set NPORT-ID (of vha)*/ 3954 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 3955 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 3956 cmd_pkt->port_id[1] = vha->d_id.b.area; 3957 cmd_pkt->port_id[2] = vha->d_id.b.domain; 3958 3959 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 3960 cmd_pkt->entry_status = (uint8_t) rsp->id; 3961 /* Build command packet. */ 3962 req->current_outstanding_cmd = handle; 3963 req->outstanding_cmds[handle] = sp; 3964 sp->handle = handle; 3965 req->cnt -= req_cnt; 3966 3967 /* Send the command to the firmware */ 3968 wmb(); 3969 qla2x00_start_iocbs(vha, req); 3970 queuing_error: 3971 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3972 return rval; 3973 } 3974