1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/blkdev.h> 10 #include <linux/delay.h> 11 12 #include <scsi/scsi_tcq.h> 13 14 /** 15 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 16 * @sp: SCSI command 17 * 18 * Returns the proper CF_* direction based on CDB. 19 */ 20 static inline uint16_t 21 qla2x00_get_cmd_direction(srb_t *sp) 22 { 23 uint16_t cflags; 24 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 25 struct scsi_qla_host *vha = sp->vha; 26 27 cflags = 0; 28 29 /* Set transfer direction */ 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 31 cflags = CF_WRITE; 32 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 33 vha->qla_stats.output_requests++; 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 cflags = CF_READ; 36 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 37 vha->qla_stats.input_requests++; 38 } 39 return (cflags); 40 } 41 42 /** 43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 44 * Continuation Type 0 IOCBs to allocate. 45 * 46 * @dsds: number of data segment descriptors needed 47 * 48 * Returns the number of IOCB entries needed to store @dsds. 49 */ 50 uint16_t 51 qla2x00_calc_iocbs_32(uint16_t dsds) 52 { 53 uint16_t iocbs; 54 55 iocbs = 1; 56 if (dsds > 3) { 57 iocbs += (dsds - 3) / 7; 58 if ((dsds - 3) % 7) 59 iocbs++; 60 } 61 return (iocbs); 62 } 63 64 /** 65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 66 * Continuation Type 1 IOCBs to allocate. 67 * 68 * @dsds: number of data segment descriptors needed 69 * 70 * Returns the number of IOCB entries needed to store @dsds. 71 */ 72 uint16_t 73 qla2x00_calc_iocbs_64(uint16_t dsds) 74 { 75 uint16_t iocbs; 76 77 iocbs = 1; 78 if (dsds > 2) { 79 iocbs += (dsds - 2) / 5; 80 if ((dsds - 2) % 5) 81 iocbs++; 82 } 83 return (iocbs); 84 } 85 86 /** 87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 88 * @vha: HA context 89 * 90 * Returns a pointer to the Continuation Type 0 IOCB packet. 91 */ 92 static inline cont_entry_t * 93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 94 { 95 cont_entry_t *cont_pkt; 96 struct req_que *req = vha->req; 97 /* Adjust ring index. */ 98 req->ring_index++; 99 if (req->ring_index == req->length) { 100 req->ring_index = 0; 101 req->ring_ptr = req->ring; 102 } else { 103 req->ring_ptr++; 104 } 105 106 cont_pkt = (cont_entry_t *)req->ring_ptr; 107 108 /* Load packet defaults. */ 109 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); 110 111 return (cont_pkt); 112 } 113 114 /** 115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 116 * @vha: HA context 117 * @req: request queue 118 * 119 * Returns a pointer to the continuation type 1 IOCB packet. 120 */ 121 static inline cont_a64_entry_t * 122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 123 { 124 cont_a64_entry_t *cont_pkt; 125 126 /* Adjust ring index. */ 127 req->ring_index++; 128 if (req->ring_index == req->length) { 129 req->ring_index = 0; 130 req->ring_ptr = req->ring; 131 } else { 132 req->ring_ptr++; 133 } 134 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 136 137 /* Load packet defaults. */ 138 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : 139 CONTINUE_A64_TYPE, &cont_pkt->entry_type); 140 141 return (cont_pkt); 142 } 143 144 inline int 145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 146 { 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 148 uint8_t guard = scsi_host_get_guard(cmd->device->host); 149 150 /* We always use DIFF Bundling for best performance */ 151 *fw_prot_opts = 0; 152 153 /* Translate SCSI opcode to a protection opcode */ 154 switch (scsi_get_prot_op(cmd)) { 155 case SCSI_PROT_READ_STRIP: 156 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 157 break; 158 case SCSI_PROT_WRITE_INSERT: 159 *fw_prot_opts |= PO_MODE_DIF_INSERT; 160 break; 161 case SCSI_PROT_READ_INSERT: 162 *fw_prot_opts |= PO_MODE_DIF_INSERT; 163 break; 164 case SCSI_PROT_WRITE_STRIP: 165 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 166 break; 167 case SCSI_PROT_READ_PASS: 168 case SCSI_PROT_WRITE_PASS: 169 if (guard & SHOST_DIX_GUARD_IP) 170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 171 else 172 *fw_prot_opts |= PO_MODE_DIF_PASS; 173 break; 174 default: /* Normal Request */ 175 *fw_prot_opts |= PO_MODE_DIF_PASS; 176 break; 177 } 178 179 return scsi_prot_sg_count(cmd); 180 } 181 182 /* 183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 184 * capable IOCB types. 185 * 186 * @sp: SRB command to process 187 * @cmd_pkt: Command type 2 IOCB 188 * @tot_dsds: Total number of segments to transfer 189 */ 190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 191 uint16_t tot_dsds) 192 { 193 uint16_t avail_dsds; 194 struct dsd32 *cur_dsd; 195 scsi_qla_host_t *vha; 196 struct scsi_cmnd *cmd; 197 struct scatterlist *sg; 198 int i; 199 200 cmd = GET_CMD_SP(sp); 201 202 /* Update entry type to indicate Command Type 2 IOCB */ 203 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); 204 205 /* No data transfer */ 206 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 207 cmd_pkt->byte_count = cpu_to_le32(0); 208 return; 209 } 210 211 vha = sp->vha; 212 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 213 214 /* Three DSDs are available in the Command Type 2 IOCB */ 215 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); 216 cur_dsd = cmd_pkt->dsd32; 217 218 /* Load data segments */ 219 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 220 cont_entry_t *cont_pkt; 221 222 /* Allocate additional continuation packets? */ 223 if (avail_dsds == 0) { 224 /* 225 * Seven DSDs are available in the Continuation 226 * Type 0 IOCB. 227 */ 228 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 229 cur_dsd = cont_pkt->dsd; 230 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 231 } 232 233 append_dsd32(&cur_dsd, sg); 234 avail_dsds--; 235 } 236 } 237 238 /** 239 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 240 * capable IOCB types. 241 * 242 * @sp: SRB command to process 243 * @cmd_pkt: Command type 3 IOCB 244 * @tot_dsds: Total number of segments to transfer 245 */ 246 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 247 uint16_t tot_dsds) 248 { 249 uint16_t avail_dsds; 250 struct dsd64 *cur_dsd; 251 scsi_qla_host_t *vha; 252 struct scsi_cmnd *cmd; 253 struct scatterlist *sg; 254 int i; 255 256 cmd = GET_CMD_SP(sp); 257 258 /* Update entry type to indicate Command Type 3 IOCB */ 259 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); 260 261 /* No data transfer */ 262 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 263 cmd_pkt->byte_count = cpu_to_le32(0); 264 return; 265 } 266 267 vha = sp->vha; 268 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 269 270 /* Two DSDs are available in the Command Type 3 IOCB */ 271 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); 272 cur_dsd = cmd_pkt->dsd64; 273 274 /* Load data segments */ 275 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 276 cont_a64_entry_t *cont_pkt; 277 278 /* Allocate additional continuation packets? */ 279 if (avail_dsds == 0) { 280 /* 281 * Five DSDs are available in the Continuation 282 * Type 1 IOCB. 283 */ 284 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 285 cur_dsd = cont_pkt->dsd; 286 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 287 } 288 289 append_dsd64(&cur_dsd, sg); 290 avail_dsds--; 291 } 292 } 293 294 /* 295 * Find the first handle that is not in use, starting from 296 * req->current_outstanding_cmd + 1. The caller must hold the lock that is 297 * associated with @req. 298 */ 299 uint32_t qla2xxx_get_next_handle(struct req_que *req) 300 { 301 uint32_t index, handle = req->current_outstanding_cmd; 302 303 for (index = 1; index < req->num_outstanding_cmds; index++) { 304 handle++; 305 if (handle == req->num_outstanding_cmds) 306 handle = 1; 307 if (!req->outstanding_cmds[handle]) 308 return handle; 309 } 310 311 return 0; 312 } 313 314 /** 315 * qla2x00_start_scsi() - Send a SCSI command to the ISP 316 * @sp: command to send to the ISP 317 * 318 * Returns non-zero if a failure occurred, else zero. 319 */ 320 int 321 qla2x00_start_scsi(srb_t *sp) 322 { 323 int nseg; 324 unsigned long flags; 325 scsi_qla_host_t *vha; 326 struct scsi_cmnd *cmd; 327 uint32_t *clr_ptr; 328 uint32_t handle; 329 cmd_entry_t *cmd_pkt; 330 uint16_t cnt; 331 uint16_t req_cnt; 332 uint16_t tot_dsds; 333 struct device_reg_2xxx __iomem *reg; 334 struct qla_hw_data *ha; 335 struct req_que *req; 336 struct rsp_que *rsp; 337 338 /* Setup device pointers. */ 339 vha = sp->vha; 340 ha = vha->hw; 341 reg = &ha->iobase->isp; 342 cmd = GET_CMD_SP(sp); 343 req = ha->req_q_map[0]; 344 rsp = ha->rsp_q_map[0]; 345 /* So we know we haven't pci_map'ed anything yet */ 346 tot_dsds = 0; 347 348 /* Send marker if required */ 349 if (vha->marker_needed != 0) { 350 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 351 QLA_SUCCESS) { 352 return (QLA_FUNCTION_FAILED); 353 } 354 vha->marker_needed = 0; 355 } 356 357 /* Acquire ring specific lock */ 358 spin_lock_irqsave(&ha->hardware_lock, flags); 359 360 handle = qla2xxx_get_next_handle(req); 361 if (handle == 0) 362 goto queuing_error; 363 364 /* Map the sg table so we have an accurate count of sg entries needed */ 365 if (scsi_sg_count(cmd)) { 366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 367 scsi_sg_count(cmd), cmd->sc_data_direction); 368 if (unlikely(!nseg)) 369 goto queuing_error; 370 } else 371 nseg = 0; 372 373 tot_dsds = nseg; 374 375 /* Calculate the number of request entries needed. */ 376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 377 if (req->cnt < (req_cnt + 2)) { 378 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); 379 if (req->ring_index < cnt) 380 req->cnt = cnt - req->ring_index; 381 else 382 req->cnt = req->length - 383 (req->ring_index - cnt); 384 /* If still no head room then bail out */ 385 if (req->cnt < (req_cnt + 2)) 386 goto queuing_error; 387 } 388 389 /* Build command packet */ 390 req->current_outstanding_cmd = handle; 391 req->outstanding_cmds[handle] = sp; 392 sp->handle = handle; 393 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 394 req->cnt -= req_cnt; 395 396 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 397 cmd_pkt->handle = handle; 398 /* Zero out remaining portion of packet. */ 399 clr_ptr = (uint32_t *)cmd_pkt + 2; 400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 402 403 /* Set target ID and LUN number*/ 404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 406 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 407 408 /* Load SCSI command packet. */ 409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 411 412 /* Build IOCB segments */ 413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 414 415 /* Set total data segment count. */ 416 cmd_pkt->entry_count = (uint8_t)req_cnt; 417 wmb(); 418 419 /* Adjust ring index. */ 420 req->ring_index++; 421 if (req->ring_index == req->length) { 422 req->ring_index = 0; 423 req->ring_ptr = req->ring; 424 } else 425 req->ring_ptr++; 426 427 sp->flags |= SRB_DMA_VALID; 428 429 /* Set chip new ring index. */ 430 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); 431 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 432 433 /* Manage unprocessed RIO/ZIO commands in response queue. */ 434 if (vha->flags.process_response_queue && 435 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 436 qla2x00_process_response_queue(rsp); 437 438 spin_unlock_irqrestore(&ha->hardware_lock, flags); 439 return (QLA_SUCCESS); 440 441 queuing_error: 442 if (tot_dsds) 443 scsi_dma_unmap(cmd); 444 445 spin_unlock_irqrestore(&ha->hardware_lock, flags); 446 447 return (QLA_FUNCTION_FAILED); 448 } 449 450 /** 451 * qla2x00_start_iocbs() - Execute the IOCB command 452 * @vha: HA context 453 * @req: request queue 454 */ 455 void 456 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 457 { 458 struct qla_hw_data *ha = vha->hw; 459 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 460 461 if (IS_P3P_TYPE(ha)) { 462 qla82xx_start_iocbs(vha); 463 } else { 464 /* Adjust ring index. */ 465 req->ring_index++; 466 if (req->ring_index == req->length) { 467 req->ring_index = 0; 468 req->ring_ptr = req->ring; 469 } else 470 req->ring_ptr++; 471 472 /* Set chip new ring index. */ 473 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 474 wrt_reg_dword(req->req_q_in, req->ring_index); 475 } else if (IS_QLA83XX(ha)) { 476 wrt_reg_dword(req->req_q_in, req->ring_index); 477 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); 478 } else if (IS_QLAFX00(ha)) { 479 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); 480 rd_reg_dword_relaxed(®->ispfx00.req_q_in); 481 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 482 } else if (IS_FWI2_CAPABLE(ha)) { 483 wrt_reg_dword(®->isp24.req_q_in, req->ring_index); 484 rd_reg_dword_relaxed(®->isp24.req_q_in); 485 } else { 486 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), 487 req->ring_index); 488 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); 489 } 490 } 491 } 492 493 /** 494 * __qla2x00_marker() - Send a marker IOCB to the firmware. 495 * @vha: HA context 496 * @qpair: queue pair pointer 497 * @loop_id: loop ID 498 * @lun: LUN 499 * @type: marker modifier 500 * 501 * Can be called from both normal and interrupt context. 502 * 503 * Returns non-zero if a failure occurred, else zero. 504 */ 505 static int 506 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 507 uint16_t loop_id, uint64_t lun, uint8_t type) 508 { 509 mrk_entry_t *mrk; 510 struct mrk_entry_24xx *mrk24 = NULL; 511 struct req_que *req = qpair->req; 512 struct qla_hw_data *ha = vha->hw; 513 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 514 515 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); 516 if (mrk == NULL) { 517 ql_log(ql_log_warn, base_vha, 0x3026, 518 "Failed to allocate Marker IOCB.\n"); 519 520 return (QLA_FUNCTION_FAILED); 521 } 522 523 mrk->entry_type = MARKER_TYPE; 524 mrk->modifier = type; 525 if (type != MK_SYNC_ALL) { 526 if (IS_FWI2_CAPABLE(ha)) { 527 mrk24 = (struct mrk_entry_24xx *) mrk; 528 mrk24->nport_handle = cpu_to_le16(loop_id); 529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 531 mrk24->vp_index = vha->vp_idx; 532 mrk24->handle = make_handle(req->id, mrk24->handle); 533 } else { 534 SET_TARGET_ID(ha, mrk->target, loop_id); 535 mrk->lun = cpu_to_le16((uint16_t)lun); 536 } 537 } 538 wmb(); 539 540 qla2x00_start_iocbs(vha, req); 541 542 return (QLA_SUCCESS); 543 } 544 545 int 546 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 547 uint16_t loop_id, uint64_t lun, uint8_t type) 548 { 549 int ret; 550 unsigned long flags = 0; 551 552 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 553 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); 554 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 555 556 return (ret); 557 } 558 559 /* 560 * qla2x00_issue_marker 561 * 562 * Issue marker 563 * Caller CAN have hardware lock held as specified by ha_locked parameter. 564 * Might release it, then reaquire. 565 */ 566 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 567 { 568 if (ha_locked) { 569 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 570 MK_SYNC_ALL) != QLA_SUCCESS) 571 return QLA_FUNCTION_FAILED; 572 } else { 573 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 574 MK_SYNC_ALL) != QLA_SUCCESS) 575 return QLA_FUNCTION_FAILED; 576 } 577 vha->marker_needed = 0; 578 579 return QLA_SUCCESS; 580 } 581 582 static inline int 583 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 584 uint16_t tot_dsds) 585 { 586 struct dsd64 *cur_dsd = NULL, *next_dsd; 587 scsi_qla_host_t *vha; 588 struct qla_hw_data *ha; 589 struct scsi_cmnd *cmd; 590 struct scatterlist *cur_seg; 591 uint8_t avail_dsds; 592 uint8_t first_iocb = 1; 593 uint32_t dsd_list_len; 594 struct dsd_dma *dsd_ptr; 595 struct ct6_dsd *ctx; 596 struct qla_qpair *qpair = sp->qpair; 597 598 cmd = GET_CMD_SP(sp); 599 600 /* Update entry type to indicate Command Type 3 IOCB */ 601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); 602 603 /* No data transfer */ 604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 605 cmd_pkt->byte_count = cpu_to_le32(0); 606 return 0; 607 } 608 609 vha = sp->vha; 610 ha = vha->hw; 611 612 /* Set transfer direction */ 613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 615 qpair->counters.output_bytes += scsi_bufflen(cmd); 616 qpair->counters.output_requests++; 617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 619 qpair->counters.input_bytes += scsi_bufflen(cmd); 620 qpair->counters.input_requests++; 621 } 622 623 cur_seg = scsi_sglist(cmd); 624 ctx = sp->u.scmd.ct6_ctx; 625 626 while (tot_dsds) { 627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 628 QLA_DSDS_PER_IOCB : tot_dsds; 629 tot_dsds -= avail_dsds; 630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 631 632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 633 struct dsd_dma, list); 634 next_dsd = dsd_ptr->dsd_addr; 635 list_del(&dsd_ptr->list); 636 ha->gbl_dsd_avail--; 637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 638 ctx->dsd_use_cnt++; 639 ha->gbl_dsd_inuse++; 640 641 if (first_iocb) { 642 first_iocb = 0; 643 put_unaligned_le64(dsd_ptr->dsd_list_dma, 644 &cmd_pkt->fcp_dsd.address); 645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); 646 } else { 647 put_unaligned_le64(dsd_ptr->dsd_list_dma, 648 &cur_dsd->address); 649 cur_dsd->length = cpu_to_le32(dsd_list_len); 650 cur_dsd++; 651 } 652 cur_dsd = next_dsd; 653 while (avail_dsds) { 654 append_dsd64(&cur_dsd, cur_seg); 655 cur_seg = sg_next(cur_seg); 656 avail_dsds--; 657 } 658 } 659 660 /* Null termination */ 661 cur_dsd->address = 0; 662 cur_dsd->length = 0; 663 cur_dsd++; 664 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 665 return 0; 666 } 667 668 /* 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 670 * for Command Type 6. 671 * 672 * @dsds: number of data segment descriptors needed 673 * 674 * Returns the number of dsd list needed to store @dsds. 675 */ 676 static inline uint16_t 677 qla24xx_calc_dsd_lists(uint16_t dsds) 678 { 679 uint16_t dsd_lists = 0; 680 681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 682 if (dsds % QLA_DSDS_PER_IOCB) 683 dsd_lists++; 684 return dsd_lists; 685 } 686 687 688 /** 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 690 * IOCB types. 691 * 692 * @sp: SRB command to process 693 * @cmd_pkt: Command type 3 IOCB 694 * @tot_dsds: Total number of segments to transfer 695 * @req: pointer to request queue 696 */ 697 inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds, struct req_que *req) 700 { 701 uint16_t avail_dsds; 702 struct dsd64 *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 struct qla_qpair *qpair = sp->qpair; 708 709 cmd = GET_CMD_SP(sp); 710 711 /* Update entry type to indicate Command Type 3 IOCB */ 712 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); 713 714 /* No data transfer */ 715 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 716 cmd_pkt->byte_count = cpu_to_le32(0); 717 return; 718 } 719 720 vha = sp->vha; 721 722 /* Set transfer direction */ 723 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 724 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 725 qpair->counters.output_bytes += scsi_bufflen(cmd); 726 qpair->counters.output_requests++; 727 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 728 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 729 qpair->counters.input_bytes += scsi_bufflen(cmd); 730 qpair->counters.input_requests++; 731 } 732 733 /* One DSD is available in the Command Type 3 IOCB */ 734 avail_dsds = 1; 735 cur_dsd = &cmd_pkt->dsd; 736 737 /* Load data segments */ 738 739 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 740 cont_a64_entry_t *cont_pkt; 741 742 /* Allocate additional continuation packets? */ 743 if (avail_dsds == 0) { 744 /* 745 * Five DSDs are available in the Continuation 746 * Type 1 IOCB. 747 */ 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 749 cur_dsd = cont_pkt->dsd; 750 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 751 } 752 753 append_dsd64(&cur_dsd, sg); 754 avail_dsds--; 755 } 756 } 757 758 struct fw_dif_context { 759 __le32 ref_tag; 760 __le16 app_tag; 761 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 762 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 763 }; 764 765 /* 766 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 767 * 768 */ 769 static inline void 770 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 771 unsigned int protcnt) 772 { 773 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 774 775 switch (scsi_get_prot_type(cmd)) { 776 case SCSI_PROT_DIF_TYPE0: 777 /* 778 * No check for ql2xenablehba_err_chk, as it would be an 779 * I/O error if hba tag generation is not done. 780 */ 781 pkt->ref_tag = cpu_to_le32((uint32_t) 782 (0xffffffff & scsi_get_lba(cmd))); 783 784 if (!qla2x00_hba_err_chk_enabled(sp)) 785 break; 786 787 pkt->ref_tag_mask[0] = 0xff; 788 pkt->ref_tag_mask[1] = 0xff; 789 pkt->ref_tag_mask[2] = 0xff; 790 pkt->ref_tag_mask[3] = 0xff; 791 break; 792 793 /* 794 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 795 * match LBA in CDB + N 796 */ 797 case SCSI_PROT_DIF_TYPE2: 798 pkt->app_tag = cpu_to_le16(0); 799 pkt->app_tag_mask[0] = 0x0; 800 pkt->app_tag_mask[1] = 0x0; 801 802 pkt->ref_tag = cpu_to_le32((uint32_t) 803 (0xffffffff & scsi_get_lba(cmd))); 804 805 if (!qla2x00_hba_err_chk_enabled(sp)) 806 break; 807 808 /* enable ALL bytes of the ref tag */ 809 pkt->ref_tag_mask[0] = 0xff; 810 pkt->ref_tag_mask[1] = 0xff; 811 pkt->ref_tag_mask[2] = 0xff; 812 pkt->ref_tag_mask[3] = 0xff; 813 break; 814 815 /* For Type 3 protection: 16 bit GUARD only */ 816 case SCSI_PROT_DIF_TYPE3: 817 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 818 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 819 0x00; 820 break; 821 822 /* 823 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 824 * 16 bit app tag. 825 */ 826 case SCSI_PROT_DIF_TYPE1: 827 pkt->ref_tag = cpu_to_le32((uint32_t) 828 (0xffffffff & scsi_get_lba(cmd))); 829 pkt->app_tag = cpu_to_le16(0); 830 pkt->app_tag_mask[0] = 0x0; 831 pkt->app_tag_mask[1] = 0x0; 832 833 if (!qla2x00_hba_err_chk_enabled(sp)) 834 break; 835 836 /* enable ALL bytes of the ref tag */ 837 pkt->ref_tag_mask[0] = 0xff; 838 pkt->ref_tag_mask[1] = 0xff; 839 pkt->ref_tag_mask[2] = 0xff; 840 pkt->ref_tag_mask[3] = 0xff; 841 break; 842 } 843 } 844 845 int 846 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 847 uint32_t *partial) 848 { 849 struct scatterlist *sg; 850 uint32_t cumulative_partial, sg_len; 851 dma_addr_t sg_dma_addr; 852 853 if (sgx->num_bytes == sgx->tot_bytes) 854 return 0; 855 856 sg = sgx->cur_sg; 857 cumulative_partial = sgx->tot_partial; 858 859 sg_dma_addr = sg_dma_address(sg); 860 sg_len = sg_dma_len(sg); 861 862 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 863 864 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 865 sgx->dma_len = (blk_sz - cumulative_partial); 866 sgx->tot_partial = 0; 867 sgx->num_bytes += blk_sz; 868 *partial = 0; 869 } else { 870 sgx->dma_len = sg_len - sgx->bytes_consumed; 871 sgx->tot_partial += sgx->dma_len; 872 *partial = 1; 873 } 874 875 sgx->bytes_consumed += sgx->dma_len; 876 877 if (sg_len == sgx->bytes_consumed) { 878 sg = sg_next(sg); 879 sgx->num_sg++; 880 sgx->cur_sg = sg; 881 sgx->bytes_consumed = 0; 882 } 883 884 return 1; 885 } 886 887 int 888 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 889 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 890 { 891 void *next_dsd; 892 uint8_t avail_dsds = 0; 893 uint32_t dsd_list_len; 894 struct dsd_dma *dsd_ptr; 895 struct scatterlist *sg_prot; 896 struct dsd64 *cur_dsd = dsd; 897 uint16_t used_dsds = tot_dsds; 898 uint32_t prot_int; /* protection interval */ 899 uint32_t partial; 900 struct qla2_sgx sgx; 901 dma_addr_t sle_dma; 902 uint32_t sle_dma_len, tot_prot_dma_len = 0; 903 struct scsi_cmnd *cmd; 904 905 memset(&sgx, 0, sizeof(struct qla2_sgx)); 906 if (sp) { 907 cmd = GET_CMD_SP(sp); 908 prot_int = cmd->device->sector_size; 909 910 sgx.tot_bytes = scsi_bufflen(cmd); 911 sgx.cur_sg = scsi_sglist(cmd); 912 sgx.sp = sp; 913 914 sg_prot = scsi_prot_sglist(cmd); 915 } else if (tc) { 916 prot_int = tc->blk_sz; 917 sgx.tot_bytes = tc->bufflen; 918 sgx.cur_sg = tc->sg; 919 sg_prot = tc->prot_sg; 920 } else { 921 BUG(); 922 return 1; 923 } 924 925 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 926 927 sle_dma = sgx.dma_addr; 928 sle_dma_len = sgx.dma_len; 929 alloc_and_fill: 930 /* Allocate additional continuation packets? */ 931 if (avail_dsds == 0) { 932 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 933 QLA_DSDS_PER_IOCB : used_dsds; 934 dsd_list_len = (avail_dsds + 1) * 12; 935 used_dsds -= avail_dsds; 936 937 /* allocate tracking DS */ 938 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 939 if (!dsd_ptr) 940 return 1; 941 942 /* allocate new list */ 943 dsd_ptr->dsd_addr = next_dsd = 944 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 945 &dsd_ptr->dsd_list_dma); 946 947 if (!next_dsd) { 948 /* 949 * Need to cleanup only this dsd_ptr, rest 950 * will be done by sp_free_dma() 951 */ 952 kfree(dsd_ptr); 953 return 1; 954 } 955 956 if (sp) { 957 list_add_tail(&dsd_ptr->list, 958 &sp->u.scmd.crc_ctx->dsd_list); 959 960 sp->flags |= SRB_CRC_CTX_DSD_VALID; 961 } else { 962 list_add_tail(&dsd_ptr->list, 963 &(tc->ctx->dsd_list)); 964 *tc->ctx_dsd_alloced = 1; 965 } 966 967 968 /* add new list to cmd iocb or last list */ 969 put_unaligned_le64(dsd_ptr->dsd_list_dma, 970 &cur_dsd->address); 971 cur_dsd->length = cpu_to_le32(dsd_list_len); 972 cur_dsd = next_dsd; 973 } 974 put_unaligned_le64(sle_dma, &cur_dsd->address); 975 cur_dsd->length = cpu_to_le32(sle_dma_len); 976 cur_dsd++; 977 avail_dsds--; 978 979 if (partial == 0) { 980 /* Got a full protection interval */ 981 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 982 sle_dma_len = 8; 983 984 tot_prot_dma_len += sle_dma_len; 985 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 986 tot_prot_dma_len = 0; 987 sg_prot = sg_next(sg_prot); 988 } 989 990 partial = 1; /* So as to not re-enter this block */ 991 goto alloc_and_fill; 992 } 993 } 994 /* Null termination */ 995 cur_dsd->address = 0; 996 cur_dsd->length = 0; 997 cur_dsd++; 998 return 0; 999 } 1000 1001 int 1002 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, 1003 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 1004 { 1005 void *next_dsd; 1006 uint8_t avail_dsds = 0; 1007 uint32_t dsd_list_len; 1008 struct dsd_dma *dsd_ptr; 1009 struct scatterlist *sg, *sgl; 1010 struct dsd64 *cur_dsd = dsd; 1011 int i; 1012 uint16_t used_dsds = tot_dsds; 1013 struct scsi_cmnd *cmd; 1014 1015 if (sp) { 1016 cmd = GET_CMD_SP(sp); 1017 sgl = scsi_sglist(cmd); 1018 } else if (tc) { 1019 sgl = tc->sg; 1020 } else { 1021 BUG(); 1022 return 1; 1023 } 1024 1025 1026 for_each_sg(sgl, sg, tot_dsds, i) { 1027 /* Allocate additional continuation packets? */ 1028 if (avail_dsds == 0) { 1029 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1030 QLA_DSDS_PER_IOCB : used_dsds; 1031 dsd_list_len = (avail_dsds + 1) * 12; 1032 used_dsds -= avail_dsds; 1033 1034 /* allocate tracking DS */ 1035 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1036 if (!dsd_ptr) 1037 return 1; 1038 1039 /* allocate new list */ 1040 dsd_ptr->dsd_addr = next_dsd = 1041 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1042 &dsd_ptr->dsd_list_dma); 1043 1044 if (!next_dsd) { 1045 /* 1046 * Need to cleanup only this dsd_ptr, rest 1047 * will be done by sp_free_dma() 1048 */ 1049 kfree(dsd_ptr); 1050 return 1; 1051 } 1052 1053 if (sp) { 1054 list_add_tail(&dsd_ptr->list, 1055 &sp->u.scmd.crc_ctx->dsd_list); 1056 1057 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1058 } else { 1059 list_add_tail(&dsd_ptr->list, 1060 &(tc->ctx->dsd_list)); 1061 *tc->ctx_dsd_alloced = 1; 1062 } 1063 1064 /* add new list to cmd iocb or last list */ 1065 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1066 &cur_dsd->address); 1067 cur_dsd->length = cpu_to_le32(dsd_list_len); 1068 cur_dsd = next_dsd; 1069 } 1070 append_dsd64(&cur_dsd, sg); 1071 avail_dsds--; 1072 1073 } 1074 /* Null termination */ 1075 cur_dsd->address = 0; 1076 cur_dsd->length = 0; 1077 cur_dsd++; 1078 return 0; 1079 } 1080 1081 int 1082 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1083 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1084 { 1085 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1086 struct scatterlist *sg, *sgl; 1087 struct crc_context *difctx = NULL; 1088 struct scsi_qla_host *vha; 1089 uint dsd_list_len; 1090 uint avail_dsds = 0; 1091 uint used_dsds = tot_dsds; 1092 bool dif_local_dma_alloc = false; 1093 bool direction_to_device = false; 1094 int i; 1095 1096 if (sp) { 1097 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1098 1099 sgl = scsi_prot_sglist(cmd); 1100 vha = sp->vha; 1101 difctx = sp->u.scmd.crc_ctx; 1102 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1103 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1104 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1105 __func__, cmd, difctx, sp); 1106 } else if (tc) { 1107 vha = tc->vha; 1108 sgl = tc->prot_sg; 1109 difctx = tc->ctx; 1110 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; 1111 } else { 1112 BUG(); 1113 return 1; 1114 } 1115 1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1117 "%s: enter (write=%u)\n", __func__, direction_to_device); 1118 1119 /* if initiator doing write or target doing read */ 1120 if (direction_to_device) { 1121 for_each_sg(sgl, sg, tot_dsds, i) { 1122 u64 sle_phys = sg_phys(sg); 1123 1124 /* If SGE addr + len flips bits in upper 32-bits */ 1125 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1126 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, 1127 "%s: page boundary crossing (phys=%llx len=%x)\n", 1128 __func__, sle_phys, sg->length); 1129 1130 if (difctx) { 1131 ha->dif_bundle_crossed_pages++; 1132 dif_local_dma_alloc = true; 1133 } else { 1134 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1135 vha, 0xe022, 1136 "%s: difctx pointer is NULL\n", 1137 __func__); 1138 } 1139 break; 1140 } 1141 } 1142 ha->dif_bundle_writes++; 1143 } else { 1144 ha->dif_bundle_reads++; 1145 } 1146 1147 if (ql2xdifbundlinginternalbuffers) 1148 dif_local_dma_alloc = direction_to_device; 1149 1150 if (dif_local_dma_alloc) { 1151 u32 track_difbundl_buf = 0; 1152 u32 ldma_sg_len = 0; 1153 u8 ldma_needed = 1; 1154 1155 difctx->no_dif_bundl = 0; 1156 difctx->dif_bundl_len = 0; 1157 1158 /* Track DSD buffers */ 1159 INIT_LIST_HEAD(&difctx->ldif_dsd_list); 1160 /* Track local DMA buffers */ 1161 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); 1162 1163 for_each_sg(sgl, sg, tot_dsds, i) { 1164 u32 sglen = sg_dma_len(sg); 1165 1166 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1167 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1168 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, 1169 difctx->dif_bundl_len, ldma_needed); 1170 1171 while (sglen) { 1172 u32 xfrlen = 0; 1173 1174 if (ldma_needed) { 1175 /* 1176 * Allocate list item to store 1177 * the DMA buffers 1178 */ 1179 dsd_ptr = kzalloc(sizeof(*dsd_ptr), 1180 GFP_ATOMIC); 1181 if (!dsd_ptr) { 1182 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1183 "%s: failed alloc dsd_ptr\n", 1184 __func__); 1185 return 1; 1186 } 1187 ha->dif_bundle_kallocs++; 1188 1189 /* allocate dma buffer */ 1190 dsd_ptr->dsd_addr = dma_pool_alloc 1191 (ha->dif_bundl_pool, GFP_ATOMIC, 1192 &dsd_ptr->dsd_list_dma); 1193 if (!dsd_ptr->dsd_addr) { 1194 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1195 "%s: failed alloc ->dsd_ptr\n", 1196 __func__); 1197 /* 1198 * need to cleanup only this 1199 * dsd_ptr rest will be done 1200 * by sp_free_dma() 1201 */ 1202 kfree(dsd_ptr); 1203 ha->dif_bundle_kallocs--; 1204 return 1; 1205 } 1206 ha->dif_bundle_dma_allocs++; 1207 ldma_needed = 0; 1208 difctx->no_dif_bundl++; 1209 list_add_tail(&dsd_ptr->list, 1210 &difctx->ldif_dma_hndl_list); 1211 } 1212 1213 /* xfrlen is min of dma pool size and sglen */ 1214 xfrlen = (sglen > 1215 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? 1216 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : 1217 sglen; 1218 1219 /* replace with local allocated dma buffer */ 1220 sg_pcopy_to_buffer(sgl, sg_nents(sgl), 1221 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, 1222 difctx->dif_bundl_len); 1223 difctx->dif_bundl_len += xfrlen; 1224 sglen -= xfrlen; 1225 ldma_sg_len += xfrlen; 1226 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || 1227 sg_is_last(sg)) { 1228 ldma_needed = 1; 1229 ldma_sg_len = 0; 1230 } 1231 } 1232 } 1233 1234 track_difbundl_buf = used_dsds = difctx->no_dif_bundl; 1235 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, 1236 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", 1237 difctx->dif_bundl_len, difctx->no_dif_bundl, 1238 track_difbundl_buf); 1239 1240 if (sp) 1241 sp->flags |= SRB_DIF_BUNDL_DMA_VALID; 1242 else 1243 tc->prot_flags = DIF_BUNDL_DMA_VALID; 1244 1245 list_for_each_entry_safe(dif_dsd, nxt_dsd, 1246 &difctx->ldif_dma_hndl_list, list) { 1247 u32 sglen = (difctx->dif_bundl_len > 1248 DIF_BUNDLING_DMA_POOL_SIZE) ? 1249 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; 1250 1251 BUG_ON(track_difbundl_buf == 0); 1252 1253 /* Allocate additional continuation packets? */ 1254 if (avail_dsds == 0) { 1255 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 1256 0xe024, 1257 "%s: adding continuation iocb's\n", 1258 __func__); 1259 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1260 QLA_DSDS_PER_IOCB : used_dsds; 1261 dsd_list_len = (avail_dsds + 1) * 12; 1262 used_dsds -= avail_dsds; 1263 1264 /* allocate tracking DS */ 1265 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1266 if (!dsd_ptr) { 1267 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1268 "%s: failed alloc dsd_ptr\n", 1269 __func__); 1270 return 1; 1271 } 1272 ha->dif_bundle_kallocs++; 1273 1274 difctx->no_ldif_dsd++; 1275 /* allocate new list */ 1276 dsd_ptr->dsd_addr = 1277 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1278 &dsd_ptr->dsd_list_dma); 1279 if (!dsd_ptr->dsd_addr) { 1280 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1281 "%s: failed alloc ->dsd_addr\n", 1282 __func__); 1283 /* 1284 * need to cleanup only this dsd_ptr 1285 * rest will be done by sp_free_dma() 1286 */ 1287 kfree(dsd_ptr); 1288 ha->dif_bundle_kallocs--; 1289 return 1; 1290 } 1291 ha->dif_bundle_dma_allocs++; 1292 1293 if (sp) { 1294 list_add_tail(&dsd_ptr->list, 1295 &difctx->ldif_dsd_list); 1296 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1297 } else { 1298 list_add_tail(&dsd_ptr->list, 1299 &difctx->ldif_dsd_list); 1300 tc->ctx_dsd_alloced = 1; 1301 } 1302 1303 /* add new list to cmd iocb or last list */ 1304 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1305 &cur_dsd->address); 1306 cur_dsd->length = cpu_to_le32(dsd_list_len); 1307 cur_dsd = dsd_ptr->dsd_addr; 1308 } 1309 put_unaligned_le64(dif_dsd->dsd_list_dma, 1310 &cur_dsd->address); 1311 cur_dsd->length = cpu_to_le32(sglen); 1312 cur_dsd++; 1313 avail_dsds--; 1314 difctx->dif_bundl_len -= sglen; 1315 track_difbundl_buf--; 1316 } 1317 1318 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, 1319 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, 1320 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1321 } else { 1322 for_each_sg(sgl, sg, tot_dsds, i) { 1323 /* Allocate additional continuation packets? */ 1324 if (avail_dsds == 0) { 1325 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1326 QLA_DSDS_PER_IOCB : used_dsds; 1327 dsd_list_len = (avail_dsds + 1) * 12; 1328 used_dsds -= avail_dsds; 1329 1330 /* allocate tracking DS */ 1331 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1332 if (!dsd_ptr) { 1333 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1334 vha, 0xe027, 1335 "%s: failed alloc dsd_dma...\n", 1336 __func__); 1337 return 1; 1338 } 1339 1340 /* allocate new list */ 1341 dsd_ptr->dsd_addr = 1342 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1343 &dsd_ptr->dsd_list_dma); 1344 if (!dsd_ptr->dsd_addr) { 1345 /* need to cleanup only this dsd_ptr */ 1346 /* rest will be done by sp_free_dma() */ 1347 kfree(dsd_ptr); 1348 return 1; 1349 } 1350 1351 if (sp) { 1352 list_add_tail(&dsd_ptr->list, 1353 &difctx->dsd_list); 1354 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1355 } else { 1356 list_add_tail(&dsd_ptr->list, 1357 &difctx->dsd_list); 1358 tc->ctx_dsd_alloced = 1; 1359 } 1360 1361 /* add new list to cmd iocb or last list */ 1362 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1363 &cur_dsd->address); 1364 cur_dsd->length = cpu_to_le32(dsd_list_len); 1365 cur_dsd = dsd_ptr->dsd_addr; 1366 } 1367 append_dsd64(&cur_dsd, sg); 1368 avail_dsds--; 1369 } 1370 } 1371 /* Null termination */ 1372 cur_dsd->address = 0; 1373 cur_dsd->length = 0; 1374 cur_dsd++; 1375 return 0; 1376 } 1377 1378 /** 1379 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1380 * Type 6 IOCB types. 1381 * 1382 * @sp: SRB command to process 1383 * @cmd_pkt: Command type 3 IOCB 1384 * @tot_dsds: Total number of segments to transfer 1385 * @tot_prot_dsds: Total number of segments with protection information 1386 * @fw_prot_opts: Protection options to be passed to firmware 1387 */ 1388 static inline int 1389 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1390 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1391 { 1392 struct dsd64 *cur_dsd; 1393 __be32 *fcp_dl; 1394 scsi_qla_host_t *vha; 1395 struct scsi_cmnd *cmd; 1396 uint32_t total_bytes = 0; 1397 uint32_t data_bytes; 1398 uint32_t dif_bytes; 1399 uint8_t bundling = 1; 1400 uint16_t blk_size; 1401 struct crc_context *crc_ctx_pkt = NULL; 1402 struct qla_hw_data *ha; 1403 uint8_t additional_fcpcdb_len; 1404 uint16_t fcp_cmnd_len; 1405 struct fcp_cmnd *fcp_cmnd; 1406 dma_addr_t crc_ctx_dma; 1407 1408 cmd = GET_CMD_SP(sp); 1409 1410 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1411 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); 1412 1413 vha = sp->vha; 1414 ha = vha->hw; 1415 1416 /* No data transfer */ 1417 data_bytes = scsi_bufflen(cmd); 1418 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1419 cmd_pkt->byte_count = cpu_to_le32(0); 1420 return QLA_SUCCESS; 1421 } 1422 1423 cmd_pkt->vp_index = sp->vha->vp_idx; 1424 1425 /* Set transfer direction */ 1426 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1427 cmd_pkt->control_flags = 1428 cpu_to_le16(CF_WRITE_DATA); 1429 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1430 cmd_pkt->control_flags = 1431 cpu_to_le16(CF_READ_DATA); 1432 } 1433 1434 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1435 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1436 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1437 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1438 bundling = 0; 1439 1440 /* Allocate CRC context from global pool */ 1441 crc_ctx_pkt = sp->u.scmd.crc_ctx = 1442 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1443 1444 if (!crc_ctx_pkt) 1445 goto crc_queuing_error; 1446 1447 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1448 1449 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1450 1451 /* Set handle */ 1452 crc_ctx_pkt->handle = cmd_pkt->handle; 1453 1454 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1455 1456 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1457 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1458 1459 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); 1460 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); 1461 1462 /* Determine SCSI command length -- align to 4 byte boundary */ 1463 if (cmd->cmd_len > 16) { 1464 additional_fcpcdb_len = cmd->cmd_len - 16; 1465 if ((cmd->cmd_len % 4) != 0) { 1466 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1467 goto crc_queuing_error; 1468 } 1469 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1470 } else { 1471 additional_fcpcdb_len = 0; 1472 fcp_cmnd_len = 12 + 16 + 4; 1473 } 1474 1475 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1476 1477 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1478 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1479 fcp_cmnd->additional_cdb_len |= 1; 1480 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1481 fcp_cmnd->additional_cdb_len |= 2; 1482 1483 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1484 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1485 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1486 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, 1487 &cmd_pkt->fcp_cmnd_dseg_address); 1488 fcp_cmnd->task_management = 0; 1489 fcp_cmnd->task_attribute = TSK_SIMPLE; 1490 1491 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1492 1493 /* Compute dif len and adjust data len to incude protection */ 1494 dif_bytes = 0; 1495 blk_size = cmd->device->sector_size; 1496 dif_bytes = (data_bytes / blk_size) * 8; 1497 1498 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1499 case SCSI_PROT_READ_INSERT: 1500 case SCSI_PROT_WRITE_STRIP: 1501 total_bytes = data_bytes; 1502 data_bytes += dif_bytes; 1503 break; 1504 1505 case SCSI_PROT_READ_STRIP: 1506 case SCSI_PROT_WRITE_INSERT: 1507 case SCSI_PROT_READ_PASS: 1508 case SCSI_PROT_WRITE_PASS: 1509 total_bytes = data_bytes + dif_bytes; 1510 break; 1511 default: 1512 BUG(); 1513 } 1514 1515 if (!qla2x00_hba_err_chk_enabled(sp)) 1516 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1517 /* HBA error checking enabled */ 1518 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1519 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1520 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1521 SCSI_PROT_DIF_TYPE2)) 1522 fw_prot_opts |= BIT_10; 1523 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1524 SCSI_PROT_DIF_TYPE3) 1525 fw_prot_opts |= BIT_11; 1526 } 1527 1528 if (!bundling) { 1529 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 1530 } else { 1531 /* 1532 * Configure Bundling if we need to fetch interlaving 1533 * protection PCI accesses 1534 */ 1535 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1536 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1537 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1538 tot_prot_dsds); 1539 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 1540 } 1541 1542 /* Finish the common fields of CRC pkt */ 1543 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1544 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1545 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1546 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1547 /* Fibre channel byte count */ 1548 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1549 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1550 additional_fcpcdb_len); 1551 *fcp_dl = htonl(total_bytes); 1552 1553 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1554 cmd_pkt->byte_count = cpu_to_le32(0); 1555 return QLA_SUCCESS; 1556 } 1557 /* Walks data segments */ 1558 1559 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1560 1561 if (!bundling && tot_prot_dsds) { 1562 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1563 cur_dsd, tot_dsds, NULL)) 1564 goto crc_queuing_error; 1565 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1566 (tot_dsds - tot_prot_dsds), NULL)) 1567 goto crc_queuing_error; 1568 1569 if (bundling && tot_prot_dsds) { 1570 /* Walks dif segments */ 1571 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1572 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 1573 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1574 tot_prot_dsds, NULL)) 1575 goto crc_queuing_error; 1576 } 1577 return QLA_SUCCESS; 1578 1579 crc_queuing_error: 1580 /* Cleanup will be performed by the caller */ 1581 1582 return QLA_FUNCTION_FAILED; 1583 } 1584 1585 /** 1586 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1587 * @sp: command to send to the ISP 1588 * 1589 * Returns non-zero if a failure occurred, else zero. 1590 */ 1591 int 1592 qla24xx_start_scsi(srb_t *sp) 1593 { 1594 int nseg; 1595 unsigned long flags; 1596 uint32_t *clr_ptr; 1597 uint32_t handle; 1598 struct cmd_type_7 *cmd_pkt; 1599 uint16_t cnt; 1600 uint16_t req_cnt; 1601 uint16_t tot_dsds; 1602 struct req_que *req = NULL; 1603 struct rsp_que *rsp; 1604 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1605 struct scsi_qla_host *vha = sp->vha; 1606 struct qla_hw_data *ha = vha->hw; 1607 1608 /* Setup device pointers. */ 1609 req = vha->req; 1610 rsp = req->rsp; 1611 1612 /* So we know we haven't pci_map'ed anything yet */ 1613 tot_dsds = 0; 1614 1615 /* Send marker if required */ 1616 if (vha->marker_needed != 0) { 1617 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1618 QLA_SUCCESS) 1619 return QLA_FUNCTION_FAILED; 1620 vha->marker_needed = 0; 1621 } 1622 1623 /* Acquire ring specific lock */ 1624 spin_lock_irqsave(&ha->hardware_lock, flags); 1625 1626 handle = qla2xxx_get_next_handle(req); 1627 if (handle == 0) 1628 goto queuing_error; 1629 1630 /* Map the sg table so we have an accurate count of sg entries needed */ 1631 if (scsi_sg_count(cmd)) { 1632 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1633 scsi_sg_count(cmd), cmd->sc_data_direction); 1634 if (unlikely(!nseg)) 1635 goto queuing_error; 1636 } else 1637 nseg = 0; 1638 1639 tot_dsds = nseg; 1640 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1641 1642 sp->iores.res_type = RESOURCE_INI; 1643 sp->iores.iocb_cnt = req_cnt; 1644 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1645 goto queuing_error; 1646 1647 if (req->cnt < (req_cnt + 2)) { 1648 if (IS_SHADOW_REG_CAPABLE(ha)) { 1649 cnt = *req->out_ptr; 1650 } else { 1651 cnt = rd_reg_dword_relaxed(req->req_q_out); 1652 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 1653 goto queuing_error; 1654 } 1655 1656 if (req->ring_index < cnt) 1657 req->cnt = cnt - req->ring_index; 1658 else 1659 req->cnt = req->length - 1660 (req->ring_index - cnt); 1661 if (req->cnt < (req_cnt + 2)) 1662 goto queuing_error; 1663 } 1664 1665 /* Build command packet. */ 1666 req->current_outstanding_cmd = handle; 1667 req->outstanding_cmds[handle] = sp; 1668 sp->handle = handle; 1669 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1670 req->cnt -= req_cnt; 1671 1672 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1673 cmd_pkt->handle = make_handle(req->id, handle); 1674 1675 /* Zero out remaining portion of packet. */ 1676 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1677 clr_ptr = (uint32_t *)cmd_pkt + 2; 1678 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1679 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1680 1681 /* Set NPORT-ID and LUN number*/ 1682 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1683 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1684 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1685 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1686 cmd_pkt->vp_index = sp->vha->vp_idx; 1687 1688 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1689 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1690 1691 cmd_pkt->task = TSK_SIMPLE; 1692 1693 /* Load SCSI command packet. */ 1694 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1695 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1696 1697 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1698 1699 /* Build IOCB segments */ 1700 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1701 1702 /* Set total data segment count. */ 1703 cmd_pkt->entry_count = (uint8_t)req_cnt; 1704 wmb(); 1705 /* Adjust ring index. */ 1706 req->ring_index++; 1707 if (req->ring_index == req->length) { 1708 req->ring_index = 0; 1709 req->ring_ptr = req->ring; 1710 } else 1711 req->ring_ptr++; 1712 1713 sp->flags |= SRB_DMA_VALID; 1714 1715 /* Set chip new ring index. */ 1716 wrt_reg_dword(req->req_q_in, req->ring_index); 1717 1718 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1719 if (vha->flags.process_response_queue && 1720 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1721 qla24xx_process_response_queue(vha, rsp); 1722 1723 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1724 return QLA_SUCCESS; 1725 1726 queuing_error: 1727 if (tot_dsds) 1728 scsi_dma_unmap(cmd); 1729 1730 qla_put_iocbs(sp->qpair, &sp->iores); 1731 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1732 1733 return QLA_FUNCTION_FAILED; 1734 } 1735 1736 /** 1737 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1738 * @sp: command to send to the ISP 1739 * 1740 * Returns non-zero if a failure occurred, else zero. 1741 */ 1742 int 1743 qla24xx_dif_start_scsi(srb_t *sp) 1744 { 1745 int nseg; 1746 unsigned long flags; 1747 uint32_t *clr_ptr; 1748 uint32_t handle; 1749 uint16_t cnt; 1750 uint16_t req_cnt = 0; 1751 uint16_t tot_dsds; 1752 uint16_t tot_prot_dsds; 1753 uint16_t fw_prot_opts = 0; 1754 struct req_que *req = NULL; 1755 struct rsp_que *rsp = NULL; 1756 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1757 struct scsi_qla_host *vha = sp->vha; 1758 struct qla_hw_data *ha = vha->hw; 1759 struct cmd_type_crc_2 *cmd_pkt; 1760 uint32_t status = 0; 1761 1762 #define QDSS_GOT_Q_SPACE BIT_0 1763 1764 /* Only process protection or >16 cdb in this routine */ 1765 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1766 if (cmd->cmd_len <= 16) 1767 return qla24xx_start_scsi(sp); 1768 } 1769 1770 /* Setup device pointers. */ 1771 req = vha->req; 1772 rsp = req->rsp; 1773 1774 /* So we know we haven't pci_map'ed anything yet */ 1775 tot_dsds = 0; 1776 1777 /* Send marker if required */ 1778 if (vha->marker_needed != 0) { 1779 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1780 QLA_SUCCESS) 1781 return QLA_FUNCTION_FAILED; 1782 vha->marker_needed = 0; 1783 } 1784 1785 /* Acquire ring specific lock */ 1786 spin_lock_irqsave(&ha->hardware_lock, flags); 1787 1788 handle = qla2xxx_get_next_handle(req); 1789 if (handle == 0) 1790 goto queuing_error; 1791 1792 /* Compute number of required data segments */ 1793 /* Map the sg table so we have an accurate count of sg entries needed */ 1794 if (scsi_sg_count(cmd)) { 1795 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1796 scsi_sg_count(cmd), cmd->sc_data_direction); 1797 if (unlikely(!nseg)) 1798 goto queuing_error; 1799 else 1800 sp->flags |= SRB_DMA_VALID; 1801 1802 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1803 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1804 struct qla2_sgx sgx; 1805 uint32_t partial; 1806 1807 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1808 sgx.tot_bytes = scsi_bufflen(cmd); 1809 sgx.cur_sg = scsi_sglist(cmd); 1810 sgx.sp = sp; 1811 1812 nseg = 0; 1813 while (qla24xx_get_one_block_sg( 1814 cmd->device->sector_size, &sgx, &partial)) 1815 nseg++; 1816 } 1817 } else 1818 nseg = 0; 1819 1820 /* number of required data segments */ 1821 tot_dsds = nseg; 1822 1823 /* Compute number of required protection segments */ 1824 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1825 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1826 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1827 if (unlikely(!nseg)) 1828 goto queuing_error; 1829 else 1830 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1831 1832 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1833 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1834 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1835 } 1836 } else { 1837 nseg = 0; 1838 } 1839 1840 req_cnt = 1; 1841 /* Total Data and protection sg segment(s) */ 1842 tot_prot_dsds = nseg; 1843 tot_dsds += nseg; 1844 1845 sp->iores.res_type = RESOURCE_INI; 1846 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1847 if (qla_get_iocbs(sp->qpair, &sp->iores)) 1848 goto queuing_error; 1849 1850 if (req->cnt < (req_cnt + 2)) { 1851 if (IS_SHADOW_REG_CAPABLE(ha)) { 1852 cnt = *req->out_ptr; 1853 } else { 1854 cnt = rd_reg_dword_relaxed(req->req_q_out); 1855 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 1856 goto queuing_error; 1857 } 1858 if (req->ring_index < cnt) 1859 req->cnt = cnt - req->ring_index; 1860 else 1861 req->cnt = req->length - 1862 (req->ring_index - cnt); 1863 if (req->cnt < (req_cnt + 2)) 1864 goto queuing_error; 1865 } 1866 1867 status |= QDSS_GOT_Q_SPACE; 1868 1869 /* Build header part of command packet (excluding the OPCODE). */ 1870 req->current_outstanding_cmd = handle; 1871 req->outstanding_cmds[handle] = sp; 1872 sp->handle = handle; 1873 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1874 req->cnt -= req_cnt; 1875 1876 /* Fill-in common area */ 1877 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1878 cmd_pkt->handle = make_handle(req->id, handle); 1879 1880 clr_ptr = (uint32_t *)cmd_pkt + 2; 1881 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1882 1883 /* Set NPORT-ID and LUN number*/ 1884 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1885 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1886 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1887 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1888 1889 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1890 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1891 1892 /* Total Data and protection segment(s) */ 1893 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1894 1895 /* Build IOCB segments and adjust for data protection segments */ 1896 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1897 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1898 QLA_SUCCESS) 1899 goto queuing_error; 1900 1901 cmd_pkt->entry_count = (uint8_t)req_cnt; 1902 /* Specify response queue number where completion should happen */ 1903 cmd_pkt->entry_status = (uint8_t) rsp->id; 1904 cmd_pkt->timeout = cpu_to_le16(0); 1905 wmb(); 1906 1907 /* Adjust ring index. */ 1908 req->ring_index++; 1909 if (req->ring_index == req->length) { 1910 req->ring_index = 0; 1911 req->ring_ptr = req->ring; 1912 } else 1913 req->ring_ptr++; 1914 1915 /* Set chip new ring index. */ 1916 wrt_reg_dword(req->req_q_in, req->ring_index); 1917 1918 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1919 if (vha->flags.process_response_queue && 1920 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1921 qla24xx_process_response_queue(vha, rsp); 1922 1923 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1924 1925 return QLA_SUCCESS; 1926 1927 queuing_error: 1928 if (status & QDSS_GOT_Q_SPACE) { 1929 req->outstanding_cmds[handle] = NULL; 1930 req->cnt += req_cnt; 1931 } 1932 /* Cleanup will be performed by the caller (queuecommand) */ 1933 1934 qla_put_iocbs(sp->qpair, &sp->iores); 1935 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1936 1937 return QLA_FUNCTION_FAILED; 1938 } 1939 1940 /** 1941 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1942 * @sp: command to send to the ISP 1943 * 1944 * Returns non-zero if a failure occurred, else zero. 1945 */ 1946 static int 1947 qla2xxx_start_scsi_mq(srb_t *sp) 1948 { 1949 int nseg; 1950 unsigned long flags; 1951 uint32_t *clr_ptr; 1952 uint32_t handle; 1953 struct cmd_type_7 *cmd_pkt; 1954 uint16_t cnt; 1955 uint16_t req_cnt; 1956 uint16_t tot_dsds; 1957 struct req_que *req = NULL; 1958 struct rsp_que *rsp; 1959 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1960 struct scsi_qla_host *vha = sp->fcport->vha; 1961 struct qla_hw_data *ha = vha->hw; 1962 struct qla_qpair *qpair = sp->qpair; 1963 1964 /* Acquire qpair specific lock */ 1965 spin_lock_irqsave(&qpair->qp_lock, flags); 1966 1967 /* Setup qpair pointers */ 1968 req = qpair->req; 1969 rsp = qpair->rsp; 1970 1971 /* So we know we haven't pci_map'ed anything yet */ 1972 tot_dsds = 0; 1973 1974 /* Send marker if required */ 1975 if (vha->marker_needed != 0) { 1976 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 1977 QLA_SUCCESS) { 1978 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1979 return QLA_FUNCTION_FAILED; 1980 } 1981 vha->marker_needed = 0; 1982 } 1983 1984 handle = qla2xxx_get_next_handle(req); 1985 if (handle == 0) 1986 goto queuing_error; 1987 1988 /* Map the sg table so we have an accurate count of sg entries needed */ 1989 if (scsi_sg_count(cmd)) { 1990 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1991 scsi_sg_count(cmd), cmd->sc_data_direction); 1992 if (unlikely(!nseg)) 1993 goto queuing_error; 1994 } else 1995 nseg = 0; 1996 1997 tot_dsds = nseg; 1998 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1999 2000 sp->iores.res_type = RESOURCE_INI; 2001 sp->iores.iocb_cnt = req_cnt; 2002 if (qla_get_iocbs(sp->qpair, &sp->iores)) 2003 goto queuing_error; 2004 2005 if (req->cnt < (req_cnt + 2)) { 2006 if (IS_SHADOW_REG_CAPABLE(ha)) { 2007 cnt = *req->out_ptr; 2008 } else { 2009 cnt = rd_reg_dword_relaxed(req->req_q_out); 2010 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 2011 goto queuing_error; 2012 } 2013 2014 if (req->ring_index < cnt) 2015 req->cnt = cnt - req->ring_index; 2016 else 2017 req->cnt = req->length - 2018 (req->ring_index - cnt); 2019 if (req->cnt < (req_cnt + 2)) 2020 goto queuing_error; 2021 } 2022 2023 /* Build command packet. */ 2024 req->current_outstanding_cmd = handle; 2025 req->outstanding_cmds[handle] = sp; 2026 sp->handle = handle; 2027 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2028 req->cnt -= req_cnt; 2029 2030 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2031 cmd_pkt->handle = make_handle(req->id, handle); 2032 2033 /* Zero out remaining portion of packet. */ 2034 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2035 clr_ptr = (uint32_t *)cmd_pkt + 2; 2036 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2037 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2038 2039 /* Set NPORT-ID and LUN number*/ 2040 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2041 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2042 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2043 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2044 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2045 2046 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2047 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2048 2049 cmd_pkt->task = TSK_SIMPLE; 2050 2051 /* Load SCSI command packet. */ 2052 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2053 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2054 2055 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2056 2057 /* Build IOCB segments */ 2058 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2059 2060 /* Set total data segment count. */ 2061 cmd_pkt->entry_count = (uint8_t)req_cnt; 2062 wmb(); 2063 /* Adjust ring index. */ 2064 req->ring_index++; 2065 if (req->ring_index == req->length) { 2066 req->ring_index = 0; 2067 req->ring_ptr = req->ring; 2068 } else 2069 req->ring_ptr++; 2070 2071 sp->flags |= SRB_DMA_VALID; 2072 2073 /* Set chip new ring index. */ 2074 wrt_reg_dword(req->req_q_in, req->ring_index); 2075 2076 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2077 if (vha->flags.process_response_queue && 2078 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2079 qla24xx_process_response_queue(vha, rsp); 2080 2081 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2082 return QLA_SUCCESS; 2083 2084 queuing_error: 2085 if (tot_dsds) 2086 scsi_dma_unmap(cmd); 2087 2088 qla_put_iocbs(sp->qpair, &sp->iores); 2089 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2090 2091 return QLA_FUNCTION_FAILED; 2092 } 2093 2094 2095 /** 2096 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 2097 * @sp: command to send to the ISP 2098 * 2099 * Returns non-zero if a failure occurred, else zero. 2100 */ 2101 int 2102 qla2xxx_dif_start_scsi_mq(srb_t *sp) 2103 { 2104 int nseg; 2105 unsigned long flags; 2106 uint32_t *clr_ptr; 2107 uint32_t handle; 2108 uint16_t cnt; 2109 uint16_t req_cnt = 0; 2110 uint16_t tot_dsds; 2111 uint16_t tot_prot_dsds; 2112 uint16_t fw_prot_opts = 0; 2113 struct req_que *req = NULL; 2114 struct rsp_que *rsp = NULL; 2115 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2116 struct scsi_qla_host *vha = sp->fcport->vha; 2117 struct qla_hw_data *ha = vha->hw; 2118 struct cmd_type_crc_2 *cmd_pkt; 2119 uint32_t status = 0; 2120 struct qla_qpair *qpair = sp->qpair; 2121 2122 #define QDSS_GOT_Q_SPACE BIT_0 2123 2124 /* Check for host side state */ 2125 if (!qpair->online) { 2126 cmd->result = DID_NO_CONNECT << 16; 2127 return QLA_INTERFACE_ERROR; 2128 } 2129 2130 if (!qpair->difdix_supported && 2131 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2132 cmd->result = DID_NO_CONNECT << 16; 2133 return QLA_INTERFACE_ERROR; 2134 } 2135 2136 /* Only process protection or >16 cdb in this routine */ 2137 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 2138 if (cmd->cmd_len <= 16) 2139 return qla2xxx_start_scsi_mq(sp); 2140 } 2141 2142 spin_lock_irqsave(&qpair->qp_lock, flags); 2143 2144 /* Setup qpair pointers */ 2145 rsp = qpair->rsp; 2146 req = qpair->req; 2147 2148 /* So we know we haven't pci_map'ed anything yet */ 2149 tot_dsds = 0; 2150 2151 /* Send marker if required */ 2152 if (vha->marker_needed != 0) { 2153 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 2154 QLA_SUCCESS) { 2155 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2156 return QLA_FUNCTION_FAILED; 2157 } 2158 vha->marker_needed = 0; 2159 } 2160 2161 handle = qla2xxx_get_next_handle(req); 2162 if (handle == 0) 2163 goto queuing_error; 2164 2165 /* Compute number of required data segments */ 2166 /* Map the sg table so we have an accurate count of sg entries needed */ 2167 if (scsi_sg_count(cmd)) { 2168 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2169 scsi_sg_count(cmd), cmd->sc_data_direction); 2170 if (unlikely(!nseg)) 2171 goto queuing_error; 2172 else 2173 sp->flags |= SRB_DMA_VALID; 2174 2175 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2176 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2177 struct qla2_sgx sgx; 2178 uint32_t partial; 2179 2180 memset(&sgx, 0, sizeof(struct qla2_sgx)); 2181 sgx.tot_bytes = scsi_bufflen(cmd); 2182 sgx.cur_sg = scsi_sglist(cmd); 2183 sgx.sp = sp; 2184 2185 nseg = 0; 2186 while (qla24xx_get_one_block_sg( 2187 cmd->device->sector_size, &sgx, &partial)) 2188 nseg++; 2189 } 2190 } else 2191 nseg = 0; 2192 2193 /* number of required data segments */ 2194 tot_dsds = nseg; 2195 2196 /* Compute number of required protection segments */ 2197 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2198 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2199 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2200 if (unlikely(!nseg)) 2201 goto queuing_error; 2202 else 2203 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2204 2205 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2206 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2207 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2208 } 2209 } else { 2210 nseg = 0; 2211 } 2212 2213 req_cnt = 1; 2214 /* Total Data and protection sg segment(s) */ 2215 tot_prot_dsds = nseg; 2216 tot_dsds += nseg; 2217 2218 sp->iores.res_type = RESOURCE_INI; 2219 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2220 if (qla_get_iocbs(sp->qpair, &sp->iores)) 2221 goto queuing_error; 2222 2223 if (req->cnt < (req_cnt + 2)) { 2224 if (IS_SHADOW_REG_CAPABLE(ha)) { 2225 cnt = *req->out_ptr; 2226 } else { 2227 cnt = rd_reg_dword_relaxed(req->req_q_out); 2228 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 2229 goto queuing_error; 2230 } 2231 2232 if (req->ring_index < cnt) 2233 req->cnt = cnt - req->ring_index; 2234 else 2235 req->cnt = req->length - 2236 (req->ring_index - cnt); 2237 if (req->cnt < (req_cnt + 2)) 2238 goto queuing_error; 2239 } 2240 2241 status |= QDSS_GOT_Q_SPACE; 2242 2243 /* Build header part of command packet (excluding the OPCODE). */ 2244 req->current_outstanding_cmd = handle; 2245 req->outstanding_cmds[handle] = sp; 2246 sp->handle = handle; 2247 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2248 req->cnt -= req_cnt; 2249 2250 /* Fill-in common area */ 2251 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2252 cmd_pkt->handle = make_handle(req->id, handle); 2253 2254 clr_ptr = (uint32_t *)cmd_pkt + 2; 2255 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2256 2257 /* Set NPORT-ID and LUN number*/ 2258 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2259 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2260 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2261 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2262 2263 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2264 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2265 2266 /* Total Data and protection segment(s) */ 2267 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2268 2269 /* Build IOCB segments and adjust for data protection segments */ 2270 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2271 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2272 QLA_SUCCESS) 2273 goto queuing_error; 2274 2275 cmd_pkt->entry_count = (uint8_t)req_cnt; 2276 cmd_pkt->timeout = cpu_to_le16(0); 2277 wmb(); 2278 2279 /* Adjust ring index. */ 2280 req->ring_index++; 2281 if (req->ring_index == req->length) { 2282 req->ring_index = 0; 2283 req->ring_ptr = req->ring; 2284 } else 2285 req->ring_ptr++; 2286 2287 /* Set chip new ring index. */ 2288 wrt_reg_dword(req->req_q_in, req->ring_index); 2289 2290 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2291 if (vha->flags.process_response_queue && 2292 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2293 qla24xx_process_response_queue(vha, rsp); 2294 2295 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2296 2297 return QLA_SUCCESS; 2298 2299 queuing_error: 2300 if (status & QDSS_GOT_Q_SPACE) { 2301 req->outstanding_cmds[handle] = NULL; 2302 req->cnt += req_cnt; 2303 } 2304 /* Cleanup will be performed by the caller (queuecommand) */ 2305 2306 qla_put_iocbs(sp->qpair, &sp->iores); 2307 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2308 2309 return QLA_FUNCTION_FAILED; 2310 } 2311 2312 /* Generic Control-SRB manipulation functions. */ 2313 2314 /* hardware_lock assumed to be held. */ 2315 2316 void * 2317 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) 2318 { 2319 scsi_qla_host_t *vha = qpair->vha; 2320 struct qla_hw_data *ha = vha->hw; 2321 struct req_que *req = qpair->req; 2322 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2323 uint32_t handle; 2324 request_t *pkt; 2325 uint16_t cnt, req_cnt; 2326 2327 pkt = NULL; 2328 req_cnt = 1; 2329 handle = 0; 2330 2331 if (sp && (sp->type != SRB_SCSI_CMD)) { 2332 /* Adjust entry-counts as needed. */ 2333 req_cnt = sp->iocbs; 2334 } 2335 2336 /* Check for room on request queue. */ 2337 if (req->cnt < req_cnt + 2) { 2338 if (qpair->use_shadow_reg) 2339 cnt = *req->out_ptr; 2340 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2341 IS_QLA28XX(ha)) 2342 cnt = rd_reg_dword(®->isp25mq.req_q_out); 2343 else if (IS_P3P_TYPE(ha)) 2344 cnt = rd_reg_dword(reg->isp82.req_q_out); 2345 else if (IS_FWI2_CAPABLE(ha)) 2346 cnt = rd_reg_dword(®->isp24.req_q_out); 2347 else if (IS_QLAFX00(ha)) 2348 cnt = rd_reg_dword(®->ispfx00.req_q_out); 2349 else 2350 cnt = qla2x00_debounce_register( 2351 ISP_REQ_Q_OUT(ha, ®->isp)); 2352 2353 if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { 2354 qla_schedule_eeh_work(vha); 2355 return NULL; 2356 } 2357 2358 if (req->ring_index < cnt) 2359 req->cnt = cnt - req->ring_index; 2360 else 2361 req->cnt = req->length - 2362 (req->ring_index - cnt); 2363 } 2364 if (req->cnt < req_cnt + 2) 2365 goto queuing_error; 2366 2367 if (sp) { 2368 handle = qla2xxx_get_next_handle(req); 2369 if (handle == 0) { 2370 ql_log(ql_log_warn, vha, 0x700b, 2371 "No room on outstanding cmd array.\n"); 2372 goto queuing_error; 2373 } 2374 2375 /* Prep command array. */ 2376 req->current_outstanding_cmd = handle; 2377 req->outstanding_cmds[handle] = sp; 2378 sp->handle = handle; 2379 } 2380 2381 /* Prep packet */ 2382 req->cnt -= req_cnt; 2383 pkt = req->ring_ptr; 2384 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2385 if (IS_QLAFX00(ha)) { 2386 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); 2387 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); 2388 } else { 2389 pkt->entry_count = req_cnt; 2390 pkt->handle = handle; 2391 } 2392 2393 return pkt; 2394 2395 queuing_error: 2396 qpair->tgt_counters.num_alloc_iocb_failed++; 2397 return pkt; 2398 } 2399 2400 void * 2401 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) 2402 { 2403 scsi_qla_host_t *vha = qpair->vha; 2404 2405 if (qla2x00_reset_active(vha)) 2406 return NULL; 2407 2408 return __qla2x00_alloc_iocbs(qpair, sp); 2409 } 2410 2411 void * 2412 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) 2413 { 2414 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); 2415 } 2416 2417 static void 2418 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2419 { 2420 struct srb_iocb *lio = &sp->u.iocb_cmd; 2421 2422 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2423 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2424 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { 2425 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); 2426 if (sp->vha->flags.nvme_first_burst) 2427 logio->io_parameter[0] = 2428 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); 2429 if (sp->vha->flags.nvme2_enabled) { 2430 /* Set service parameter BIT_7 for NVME CONF support */ 2431 logio->io_parameter[0] |= 2432 cpu_to_le32(NVME_PRLI_SP_CONF); 2433 /* Set service parameter BIT_8 for SLER support */ 2434 logio->io_parameter[0] |= 2435 cpu_to_le32(NVME_PRLI_SP_SLER); 2436 /* Set service parameter BIT_9 for PI control support */ 2437 logio->io_parameter[0] |= 2438 cpu_to_le32(NVME_PRLI_SP_PI_CTRL); 2439 } 2440 } 2441 2442 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2443 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2444 logio->port_id[1] = sp->fcport->d_id.b.area; 2445 logio->port_id[2] = sp->fcport->d_id.b.domain; 2446 logio->vp_index = sp->vha->vp_idx; 2447 } 2448 2449 static void 2450 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2451 { 2452 struct srb_iocb *lio = &sp->u.iocb_cmd; 2453 2454 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2455 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2456 2457 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { 2458 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2459 } else { 2460 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2461 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2462 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2463 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2464 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2465 } 2466 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2467 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2468 logio->port_id[1] = sp->fcport->d_id.b.area; 2469 logio->port_id[2] = sp->fcport->d_id.b.domain; 2470 logio->vp_index = sp->vha->vp_idx; 2471 } 2472 2473 static void 2474 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2475 { 2476 struct qla_hw_data *ha = sp->vha->hw; 2477 struct srb_iocb *lio = &sp->u.iocb_cmd; 2478 uint16_t opts; 2479 2480 mbx->entry_type = MBX_IOCB_TYPE; 2481 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2482 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2483 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2484 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2485 if (HAS_EXTENDED_IDS(ha)) { 2486 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2487 mbx->mb10 = cpu_to_le16(opts); 2488 } else { 2489 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2490 } 2491 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2492 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2493 sp->fcport->d_id.b.al_pa); 2494 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2495 } 2496 2497 static void 2498 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2499 { 2500 u16 control_flags = LCF_COMMAND_LOGO; 2501 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2502 2503 if (sp->fcport->explicit_logout) { 2504 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; 2505 } else { 2506 control_flags |= LCF_IMPL_LOGO; 2507 2508 if (!sp->fcport->keep_nport_handle) 2509 control_flags |= LCF_FREE_NPORT; 2510 } 2511 2512 logio->control_flags = cpu_to_le16(control_flags); 2513 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2514 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2515 logio->port_id[1] = sp->fcport->d_id.b.area; 2516 logio->port_id[2] = sp->fcport->d_id.b.domain; 2517 logio->vp_index = sp->vha->vp_idx; 2518 } 2519 2520 static void 2521 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2522 { 2523 struct qla_hw_data *ha = sp->vha->hw; 2524 2525 mbx->entry_type = MBX_IOCB_TYPE; 2526 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2527 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2528 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2529 cpu_to_le16(sp->fcport->loop_id) : 2530 cpu_to_le16(sp->fcport->loop_id << 8); 2531 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2532 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2533 sp->fcport->d_id.b.al_pa); 2534 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2535 /* Implicit: mbx->mbx10 = 0. */ 2536 } 2537 2538 static void 2539 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2540 { 2541 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2542 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2543 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2544 logio->vp_index = sp->vha->vp_idx; 2545 } 2546 2547 static void 2548 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2549 { 2550 struct qla_hw_data *ha = sp->vha->hw; 2551 2552 mbx->entry_type = MBX_IOCB_TYPE; 2553 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2554 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2555 if (HAS_EXTENDED_IDS(ha)) { 2556 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2557 mbx->mb10 = cpu_to_le16(BIT_0); 2558 } else { 2559 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2560 } 2561 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2562 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2563 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2564 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2565 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2566 } 2567 2568 static void 2569 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2570 { 2571 uint32_t flags; 2572 uint64_t lun; 2573 struct fc_port *fcport = sp->fcport; 2574 scsi_qla_host_t *vha = fcport->vha; 2575 struct qla_hw_data *ha = vha->hw; 2576 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2577 struct req_que *req = vha->req; 2578 2579 flags = iocb->u.tmf.flags; 2580 lun = iocb->u.tmf.lun; 2581 2582 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2583 tsk->entry_count = 1; 2584 tsk->handle = make_handle(req->id, tsk->handle); 2585 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2586 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2587 tsk->control_flags = cpu_to_le32(flags); 2588 tsk->port_id[0] = fcport->d_id.b.al_pa; 2589 tsk->port_id[1] = fcport->d_id.b.area; 2590 tsk->port_id[2] = fcport->d_id.b.domain; 2591 tsk->vp_index = fcport->vha->vp_idx; 2592 2593 if (flags == TCF_LUN_RESET) { 2594 int_to_scsilun(lun, &tsk->lun); 2595 host_to_fcp_swap((uint8_t *)&tsk->lun, 2596 sizeof(tsk->lun)); 2597 } 2598 } 2599 2600 void qla2x00_init_timer(srb_t *sp, unsigned long tmo) 2601 { 2602 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 2603 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 2604 sp->free = qla2x00_sp_free; 2605 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2606 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2607 sp->start_timer = 1; 2608 } 2609 2610 static void qla2x00_els_dcmd_sp_free(srb_t *sp) 2611 { 2612 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2613 2614 kfree(sp->fcport); 2615 2616 if (elsio->u.els_logo.els_logo_pyld) 2617 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, 2618 elsio->u.els_logo.els_logo_pyld, 2619 elsio->u.els_logo.els_logo_pyld_dma); 2620 2621 del_timer(&elsio->timer); 2622 qla2x00_rel_sp(sp); 2623 } 2624 2625 static void 2626 qla2x00_els_dcmd_iocb_timeout(void *data) 2627 { 2628 srb_t *sp = data; 2629 fc_port_t *fcport = sp->fcport; 2630 struct scsi_qla_host *vha = sp->vha; 2631 struct srb_iocb *lio = &sp->u.iocb_cmd; 2632 unsigned long flags = 0; 2633 int res, h; 2634 2635 ql_dbg(ql_dbg_io, vha, 0x3069, 2636 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2637 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2638 fcport->d_id.b.al_pa); 2639 2640 /* Abort the exchange */ 2641 res = qla24xx_async_abort_cmd(sp, false); 2642 if (res) { 2643 ql_dbg(ql_dbg_io, vha, 0x3070, 2644 "mbx abort_command failed.\n"); 2645 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2646 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2647 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2648 sp->qpair->req->outstanding_cmds[h] = NULL; 2649 break; 2650 } 2651 } 2652 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2653 complete(&lio->u.els_logo.comp); 2654 } else { 2655 ql_dbg(ql_dbg_io, vha, 0x3071, 2656 "mbx abort_command success.\n"); 2657 } 2658 } 2659 2660 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) 2661 { 2662 fc_port_t *fcport = sp->fcport; 2663 struct srb_iocb *lio = &sp->u.iocb_cmd; 2664 struct scsi_qla_host *vha = sp->vha; 2665 2666 ql_dbg(ql_dbg_io, vha, 0x3072, 2667 "%s hdl=%x, portid=%02x%02x%02x done\n", 2668 sp->name, sp->handle, fcport->d_id.b.domain, 2669 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2670 2671 complete(&lio->u.els_logo.comp); 2672 } 2673 2674 int 2675 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2676 port_id_t remote_did) 2677 { 2678 srb_t *sp; 2679 fc_port_t *fcport = NULL; 2680 struct srb_iocb *elsio = NULL; 2681 struct qla_hw_data *ha = vha->hw; 2682 struct els_logo_payload logo_pyld; 2683 int rval = QLA_SUCCESS; 2684 2685 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2686 if (!fcport) { 2687 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2688 return -ENOMEM; 2689 } 2690 2691 /* Alloc SRB structure */ 2692 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2693 if (!sp) { 2694 kfree(fcport); 2695 ql_log(ql_log_info, vha, 0x70e6, 2696 "SRB allocation failed\n"); 2697 return -ENOMEM; 2698 } 2699 2700 elsio = &sp->u.iocb_cmd; 2701 fcport->loop_id = 0xFFFF; 2702 fcport->d_id.b.domain = remote_did.b.domain; 2703 fcport->d_id.b.area = remote_did.b.area; 2704 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2705 2706 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2707 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2708 2709 sp->type = SRB_ELS_DCMD; 2710 sp->name = "ELS_DCMD"; 2711 sp->fcport = fcport; 2712 elsio->timeout = qla2x00_els_dcmd_iocb_timeout; 2713 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); 2714 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 2715 sp->done = qla2x00_els_dcmd_sp_done; 2716 sp->free = qla2x00_els_dcmd_sp_free; 2717 2718 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2719 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2720 GFP_KERNEL); 2721 2722 if (!elsio->u.els_logo.els_logo_pyld) { 2723 sp->free(sp); 2724 return QLA_FUNCTION_FAILED; 2725 } 2726 2727 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2728 2729 elsio->u.els_logo.els_cmd = els_opcode; 2730 logo_pyld.opcode = els_opcode; 2731 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2732 logo_pyld.s_id[1] = vha->d_id.b.area; 2733 logo_pyld.s_id[2] = vha->d_id.b.domain; 2734 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2735 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2736 2737 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2738 sizeof(struct els_logo_payload)); 2739 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); 2740 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, 2741 elsio->u.els_logo.els_logo_pyld, 2742 sizeof(*elsio->u.els_logo.els_logo_pyld)); 2743 2744 rval = qla2x00_start_sp(sp); 2745 if (rval != QLA_SUCCESS) { 2746 sp->free(sp); 2747 return QLA_FUNCTION_FAILED; 2748 } 2749 2750 ql_dbg(ql_dbg_io, vha, 0x3074, 2751 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2752 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2753 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2754 2755 wait_for_completion(&elsio->u.els_logo.comp); 2756 2757 sp->free(sp); 2758 return rval; 2759 } 2760 2761 static void 2762 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2763 { 2764 scsi_qla_host_t *vha = sp->vha; 2765 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2766 2767 els_iocb->entry_type = ELS_IOCB_TYPE; 2768 els_iocb->entry_count = 1; 2769 els_iocb->sys_define = 0; 2770 els_iocb->entry_status = 0; 2771 els_iocb->handle = sp->handle; 2772 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2773 els_iocb->tx_dsd_count = cpu_to_le16(1); 2774 els_iocb->vp_index = vha->vp_idx; 2775 els_iocb->sof_type = EST_SOFI3; 2776 els_iocb->rx_dsd_count = 0; 2777 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2778 2779 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 2780 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 2781 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 2782 /* For SID the byte order is different than DID */ 2783 els_iocb->s_id[1] = vha->d_id.b.al_pa; 2784 els_iocb->s_id[2] = vha->d_id.b.area; 2785 els_iocb->s_id[0] = vha->d_id.b.domain; 2786 2787 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2788 els_iocb->control_flags = 0; 2789 els_iocb->tx_byte_count = els_iocb->tx_len = 2790 cpu_to_le32(sizeof(struct els_plogi_payload)); 2791 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2792 &els_iocb->tx_address); 2793 els_iocb->rx_dsd_count = cpu_to_le16(1); 2794 els_iocb->rx_byte_count = els_iocb->rx_len = 2795 cpu_to_le32(sizeof(struct els_plogi_payload)); 2796 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2797 &els_iocb->rx_address); 2798 2799 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, 2800 "PLOGI ELS IOCB:\n"); 2801 ql_dump_buffer(ql_log_info, vha, 0x0109, 2802 (uint8_t *)els_iocb, 2803 sizeof(*els_iocb)); 2804 } else { 2805 els_iocb->control_flags = cpu_to_le16(1 << 13); 2806 els_iocb->tx_byte_count = 2807 cpu_to_le32(sizeof(struct els_logo_payload)); 2808 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2809 &els_iocb->tx_address); 2810 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2811 2812 els_iocb->rx_byte_count = 0; 2813 els_iocb->rx_address = 0; 2814 els_iocb->rx_len = 0; 2815 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, 2816 "LOGO ELS IOCB:"); 2817 ql_dump_buffer(ql_log_info, vha, 0x010b, 2818 els_iocb, 2819 sizeof(*els_iocb)); 2820 } 2821 2822 sp->vha->qla_stats.control_requests++; 2823 } 2824 2825 static void 2826 qla2x00_els_dcmd2_iocb_timeout(void *data) 2827 { 2828 srb_t *sp = data; 2829 fc_port_t *fcport = sp->fcport; 2830 struct scsi_qla_host *vha = sp->vha; 2831 unsigned long flags = 0; 2832 int res, h; 2833 2834 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, 2835 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", 2836 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); 2837 2838 /* Abort the exchange */ 2839 res = qla24xx_async_abort_cmd(sp, false); 2840 ql_dbg(ql_dbg_io, vha, 0x3070, 2841 "mbx abort_command %s\n", 2842 (res == QLA_SUCCESS) ? "successful" : "failed"); 2843 if (res) { 2844 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); 2845 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { 2846 if (sp->qpair->req->outstanding_cmds[h] == sp) { 2847 sp->qpair->req->outstanding_cmds[h] = NULL; 2848 break; 2849 } 2850 } 2851 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); 2852 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2853 } 2854 } 2855 2856 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) 2857 { 2858 if (els_plogi->els_plogi_pyld) 2859 dma_free_coherent(&vha->hw->pdev->dev, 2860 els_plogi->tx_size, 2861 els_plogi->els_plogi_pyld, 2862 els_plogi->els_plogi_pyld_dma); 2863 2864 if (els_plogi->els_resp_pyld) 2865 dma_free_coherent(&vha->hw->pdev->dev, 2866 els_plogi->rx_size, 2867 els_plogi->els_resp_pyld, 2868 els_plogi->els_resp_pyld_dma); 2869 } 2870 2871 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) 2872 { 2873 fc_port_t *fcport = sp->fcport; 2874 struct srb_iocb *lio = &sp->u.iocb_cmd; 2875 struct scsi_qla_host *vha = sp->vha; 2876 struct event_arg ea; 2877 struct qla_work_evt *e; 2878 struct fc_port *conflict_fcport; 2879 port_id_t cid; /* conflict Nport id */ 2880 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; 2881 u16 lid; 2882 2883 ql_dbg(ql_dbg_disc, vha, 0x3072, 2884 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", 2885 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); 2886 2887 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 2888 del_timer(&sp->u.iocb_cmd.timer); 2889 2890 if (sp->flags & SRB_WAKEUP_ON_COMP) 2891 complete(&lio->u.els_plogi.comp); 2892 else { 2893 switch (le32_to_cpu(fw_status[0])) { 2894 case CS_DATA_UNDERRUN: 2895 case CS_COMPLETE: 2896 memset(&ea, 0, sizeof(ea)); 2897 ea.fcport = fcport; 2898 ea.rc = res; 2899 qla_handle_els_plogi_done(vha, &ea); 2900 break; 2901 2902 case CS_IOCB_ERROR: 2903 switch (le32_to_cpu(fw_status[1])) { 2904 case LSC_SCODE_PORTID_USED: 2905 lid = le32_to_cpu(fw_status[2]) & 0xffff; 2906 qlt_find_sess_invalidate_other(vha, 2907 wwn_to_u64(fcport->port_name), 2908 fcport->d_id, lid, &conflict_fcport); 2909 if (conflict_fcport) { 2910 /* 2911 * Another fcport shares the same 2912 * loop_id & nport id; conflict 2913 * fcport needs to finish cleanup 2914 * before this fcport can proceed 2915 * to login. 2916 */ 2917 conflict_fcport->conflict = fcport; 2918 fcport->login_pause = 1; 2919 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2920 "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", 2921 __func__, __LINE__, 2922 fcport->port_name, 2923 fcport->d_id.b24, lid); 2924 } else { 2925 ql_dbg(ql_dbg_disc, vha, 0x20ed, 2926 "%s %d %8phC pid %06x inuse with lid %#x sched del\n", 2927 __func__, __LINE__, 2928 fcport->port_name, 2929 fcport->d_id.b24, lid); 2930 qla2x00_clear_loop_id(fcport); 2931 set_bit(lid, vha->hw->loop_id_map); 2932 fcport->loop_id = lid; 2933 fcport->keep_nport_handle = 0; 2934 qlt_schedule_sess_for_deletion(fcport); 2935 } 2936 break; 2937 2938 case LSC_SCODE_NPORT_USED: 2939 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) 2940 & 0xff; 2941 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) 2942 & 0xff; 2943 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; 2944 cid.b.rsvd_1 = 0; 2945 2946 ql_dbg(ql_dbg_disc, vha, 0x20ec, 2947 "%s %d %8phC lid %#x in use with pid %06x post gnl\n", 2948 __func__, __LINE__, fcport->port_name, 2949 fcport->loop_id, cid.b24); 2950 set_bit(fcport->loop_id, 2951 vha->hw->loop_id_map); 2952 fcport->loop_id = FC_NO_LOOP_ID; 2953 qla24xx_post_gnl_work(vha, fcport); 2954 break; 2955 2956 case LSC_SCODE_NOXCB: 2957 vha->hw->exch_starvation++; 2958 if (vha->hw->exch_starvation > 5) { 2959 ql_log(ql_log_warn, vha, 0xd046, 2960 "Exchange starvation. Resetting RISC\n"); 2961 vha->hw->exch_starvation = 0; 2962 set_bit(ISP_ABORT_NEEDED, 2963 &vha->dpc_flags); 2964 qla2xxx_wake_dpc(vha); 2965 } 2966 fallthrough; 2967 default: 2968 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2969 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", 2970 __func__, sp->fcport->port_name, 2971 fw_status[0], fw_status[1], fw_status[2]); 2972 2973 fcport->flags &= ~FCF_ASYNC_SENT; 2974 qla2x00_set_fcport_disc_state(fcport, 2975 DSC_LOGIN_FAILED); 2976 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2977 break; 2978 } 2979 break; 2980 2981 default: 2982 ql_dbg(ql_dbg_disc, vha, 0x20eb, 2983 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", 2984 __func__, sp->fcport->port_name, 2985 fw_status[0], fw_status[1], fw_status[2]); 2986 2987 sp->fcport->flags &= ~FCF_ASYNC_SENT; 2988 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); 2989 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2990 break; 2991 } 2992 2993 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 2994 if (!e) { 2995 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2996 2997 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 2998 sp->free(sp); 2999 return; 3000 } 3001 e->u.iosb.sp = sp; 3002 qla2x00_post_work(vha, e); 3003 } 3004 } 3005 3006 int 3007 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, 3008 fc_port_t *fcport, bool wait) 3009 { 3010 srb_t *sp; 3011 struct srb_iocb *elsio = NULL; 3012 struct qla_hw_data *ha = vha->hw; 3013 int rval = QLA_SUCCESS; 3014 void *ptr, *resp_ptr; 3015 3016 /* Alloc SRB structure */ 3017 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 3018 if (!sp) { 3019 ql_log(ql_log_info, vha, 0x70e6, 3020 "SRB allocation failed\n"); 3021 fcport->flags &= ~FCF_ASYNC_ACTIVE; 3022 return -ENOMEM; 3023 } 3024 3025 fcport->flags |= FCF_ASYNC_SENT; 3026 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); 3027 elsio = &sp->u.iocb_cmd; 3028 ql_dbg(ql_dbg_io, vha, 0x3073, 3029 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); 3030 3031 sp->type = SRB_ELS_DCMD; 3032 sp->name = "ELS_DCMD"; 3033 sp->fcport = fcport; 3034 3035 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; 3036 if (wait) 3037 sp->flags = SRB_WAKEUP_ON_COMP; 3038 3039 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); 3040 3041 sp->done = qla2x00_els_dcmd2_sp_done; 3042 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; 3043 3044 ptr = elsio->u.els_plogi.els_plogi_pyld = 3045 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size, 3046 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 3047 3048 if (!elsio->u.els_plogi.els_plogi_pyld) { 3049 rval = QLA_FUNCTION_FAILED; 3050 goto out; 3051 } 3052 3053 resp_ptr = elsio->u.els_plogi.els_resp_pyld = 3054 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size, 3055 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); 3056 3057 if (!elsio->u.els_plogi.els_resp_pyld) { 3058 rval = QLA_FUNCTION_FAILED; 3059 goto out; 3060 } 3061 3062 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); 3063 3064 memset(ptr, 0, sizeof(struct els_plogi_payload)); 3065 memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); 3066 memcpy(elsio->u.els_plogi.els_plogi_pyld->data, 3067 &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE); 3068 3069 elsio->u.els_plogi.els_cmd = els_opcode; 3070 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; 3071 3072 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); 3073 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, 3074 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 3075 sizeof(*elsio->u.els_plogi.els_plogi_pyld)); 3076 3077 init_completion(&elsio->u.els_plogi.comp); 3078 rval = qla2x00_start_sp(sp); 3079 if (rval != QLA_SUCCESS) { 3080 rval = QLA_FUNCTION_FAILED; 3081 } else { 3082 ql_dbg(ql_dbg_disc, vha, 0x3074, 3083 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", 3084 sp->name, sp->handle, fcport->loop_id, 3085 fcport->d_id.b24, vha->d_id.b24); 3086 } 3087 3088 if (wait) { 3089 wait_for_completion(&elsio->u.els_plogi.comp); 3090 3091 if (elsio->u.els_plogi.comp_status != CS_COMPLETE) 3092 rval = QLA_FUNCTION_FAILED; 3093 } else { 3094 goto done; 3095 } 3096 3097 out: 3098 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 3099 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 3100 sp->free(sp); 3101 done: 3102 return rval; 3103 } 3104 3105 static void 3106 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 3107 { 3108 struct bsg_job *bsg_job = sp->u.bsg_job; 3109 struct fc_bsg_request *bsg_request = bsg_job->request; 3110 3111 els_iocb->entry_type = ELS_IOCB_TYPE; 3112 els_iocb->entry_count = 1; 3113 els_iocb->sys_define = 0; 3114 els_iocb->entry_status = 0; 3115 els_iocb->handle = sp->handle; 3116 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3117 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3118 els_iocb->vp_index = sp->vha->vp_idx; 3119 els_iocb->sof_type = EST_SOFI3; 3120 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3121 3122 els_iocb->opcode = 3123 sp->type == SRB_ELS_CMD_RPT ? 3124 bsg_request->rqst_data.r_els.els_code : 3125 bsg_request->rqst_data.h_els.command_code; 3126 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; 3127 els_iocb->d_id[1] = sp->fcport->d_id.b.area; 3128 els_iocb->d_id[2] = sp->fcport->d_id.b.domain; 3129 els_iocb->control_flags = 0; 3130 els_iocb->rx_byte_count = 3131 cpu_to_le32(bsg_job->reply_payload.payload_len); 3132 els_iocb->tx_byte_count = 3133 cpu_to_le32(bsg_job->request_payload.payload_len); 3134 3135 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3136 &els_iocb->tx_address); 3137 els_iocb->tx_len = cpu_to_le32(sg_dma_len 3138 (bsg_job->request_payload.sg_list)); 3139 3140 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3141 &els_iocb->rx_address); 3142 els_iocb->rx_len = cpu_to_le32(sg_dma_len 3143 (bsg_job->reply_payload.sg_list)); 3144 3145 sp->vha->qla_stats.control_requests++; 3146 } 3147 3148 static void 3149 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 3150 { 3151 uint16_t avail_dsds; 3152 struct dsd64 *cur_dsd; 3153 struct scatterlist *sg; 3154 int index; 3155 uint16_t tot_dsds; 3156 scsi_qla_host_t *vha = sp->vha; 3157 struct qla_hw_data *ha = vha->hw; 3158 struct bsg_job *bsg_job = sp->u.bsg_job; 3159 int entry_count = 1; 3160 3161 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 3162 ct_iocb->entry_type = CT_IOCB_TYPE; 3163 ct_iocb->entry_status = 0; 3164 ct_iocb->handle1 = sp->handle; 3165 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 3166 ct_iocb->status = cpu_to_le16(0); 3167 ct_iocb->control_flags = cpu_to_le16(0); 3168 ct_iocb->timeout = 0; 3169 ct_iocb->cmd_dsd_count = 3170 cpu_to_le16(bsg_job->request_payload.sg_cnt); 3171 ct_iocb->total_dsd_count = 3172 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 3173 ct_iocb->req_bytecount = 3174 cpu_to_le32(bsg_job->request_payload.payload_len); 3175 ct_iocb->rsp_bytecount = 3176 cpu_to_le32(bsg_job->reply_payload.payload_len); 3177 3178 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 3179 &ct_iocb->req_dsd.address); 3180 ct_iocb->req_dsd.length = ct_iocb->req_bytecount; 3181 3182 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 3183 &ct_iocb->rsp_dsd.address); 3184 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; 3185 3186 avail_dsds = 1; 3187 cur_dsd = &ct_iocb->rsp_dsd; 3188 index = 0; 3189 tot_dsds = bsg_job->reply_payload.sg_cnt; 3190 3191 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 3192 cont_a64_entry_t *cont_pkt; 3193 3194 /* Allocate additional continuation packets? */ 3195 if (avail_dsds == 0) { 3196 /* 3197 * Five DSDs are available in the Cont. 3198 * Type 1 IOCB. 3199 */ 3200 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3201 vha->hw->req_q_map[0]); 3202 cur_dsd = cont_pkt->dsd; 3203 avail_dsds = 5; 3204 entry_count++; 3205 } 3206 3207 append_dsd64(&cur_dsd, sg); 3208 avail_dsds--; 3209 } 3210 ct_iocb->entry_count = entry_count; 3211 3212 sp->vha->qla_stats.control_requests++; 3213 } 3214 3215 static void 3216 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 3217 { 3218 uint16_t avail_dsds; 3219 struct dsd64 *cur_dsd; 3220 struct scatterlist *sg; 3221 int index; 3222 uint16_t cmd_dsds, rsp_dsds; 3223 scsi_qla_host_t *vha = sp->vha; 3224 struct qla_hw_data *ha = vha->hw; 3225 struct bsg_job *bsg_job = sp->u.bsg_job; 3226 int entry_count = 1; 3227 cont_a64_entry_t *cont_pkt = NULL; 3228 3229 ct_iocb->entry_type = CT_IOCB_TYPE; 3230 ct_iocb->entry_status = 0; 3231 ct_iocb->sys_define = 0; 3232 ct_iocb->handle = sp->handle; 3233 3234 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3235 ct_iocb->vp_index = sp->vha->vp_idx; 3236 ct_iocb->comp_status = cpu_to_le16(0); 3237 3238 cmd_dsds = bsg_job->request_payload.sg_cnt; 3239 rsp_dsds = bsg_job->reply_payload.sg_cnt; 3240 3241 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); 3242 ct_iocb->timeout = 0; 3243 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); 3244 ct_iocb->cmd_byte_count = 3245 cpu_to_le32(bsg_job->request_payload.payload_len); 3246 3247 avail_dsds = 2; 3248 cur_dsd = ct_iocb->dsd; 3249 index = 0; 3250 3251 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3252 /* Allocate additional continuation packets? */ 3253 if (avail_dsds == 0) { 3254 /* 3255 * Five DSDs are available in the Cont. 3256 * Type 1 IOCB. 3257 */ 3258 cont_pkt = qla2x00_prep_cont_type1_iocb( 3259 vha, ha->req_q_map[0]); 3260 cur_dsd = cont_pkt->dsd; 3261 avail_dsds = 5; 3262 entry_count++; 3263 } 3264 3265 append_dsd64(&cur_dsd, sg); 3266 avail_dsds--; 3267 } 3268 3269 index = 0; 3270 3271 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3272 /* Allocate additional continuation packets? */ 3273 if (avail_dsds == 0) { 3274 /* 3275 * Five DSDs are available in the Cont. 3276 * Type 1 IOCB. 3277 */ 3278 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3279 ha->req_q_map[0]); 3280 cur_dsd = cont_pkt->dsd; 3281 avail_dsds = 5; 3282 entry_count++; 3283 } 3284 3285 append_dsd64(&cur_dsd, sg); 3286 avail_dsds--; 3287 } 3288 ct_iocb->entry_count = entry_count; 3289 } 3290 3291 /* 3292 * qla82xx_start_scsi() - Send a SCSI command to the ISP 3293 * @sp: command to send to the ISP 3294 * 3295 * Returns non-zero if a failure occurred, else zero. 3296 */ 3297 int 3298 qla82xx_start_scsi(srb_t *sp) 3299 { 3300 int nseg; 3301 unsigned long flags; 3302 struct scsi_cmnd *cmd; 3303 uint32_t *clr_ptr; 3304 uint32_t handle; 3305 uint16_t cnt; 3306 uint16_t req_cnt; 3307 uint16_t tot_dsds; 3308 struct device_reg_82xx __iomem *reg; 3309 uint32_t dbval; 3310 __be32 *fcp_dl; 3311 uint8_t additional_cdb_len; 3312 struct ct6_dsd *ctx; 3313 struct scsi_qla_host *vha = sp->vha; 3314 struct qla_hw_data *ha = vha->hw; 3315 struct req_que *req = NULL; 3316 struct rsp_que *rsp = NULL; 3317 3318 /* Setup device pointers. */ 3319 reg = &ha->iobase->isp82; 3320 cmd = GET_CMD_SP(sp); 3321 req = vha->req; 3322 rsp = ha->rsp_q_map[0]; 3323 3324 /* So we know we haven't pci_map'ed anything yet */ 3325 tot_dsds = 0; 3326 3327 dbval = 0x04 | (ha->portnum << 5); 3328 3329 /* Send marker if required */ 3330 if (vha->marker_needed != 0) { 3331 if (qla2x00_marker(vha, ha->base_qpair, 3332 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 3333 ql_log(ql_log_warn, vha, 0x300c, 3334 "qla2x00_marker failed for cmd=%p.\n", cmd); 3335 return QLA_FUNCTION_FAILED; 3336 } 3337 vha->marker_needed = 0; 3338 } 3339 3340 /* Acquire ring specific lock */ 3341 spin_lock_irqsave(&ha->hardware_lock, flags); 3342 3343 handle = qla2xxx_get_next_handle(req); 3344 if (handle == 0) 3345 goto queuing_error; 3346 3347 /* Map the sg table so we have an accurate count of sg entries needed */ 3348 if (scsi_sg_count(cmd)) { 3349 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3350 scsi_sg_count(cmd), cmd->sc_data_direction); 3351 if (unlikely(!nseg)) 3352 goto queuing_error; 3353 } else 3354 nseg = 0; 3355 3356 tot_dsds = nseg; 3357 3358 if (tot_dsds > ql2xshiftctondsd) { 3359 struct cmd_type_6 *cmd_pkt; 3360 uint16_t more_dsd_lists = 0; 3361 struct dsd_dma *dsd_ptr; 3362 uint16_t i; 3363 3364 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 3365 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 3366 ql_dbg(ql_dbg_io, vha, 0x300d, 3367 "Num of DSD list %d is than %d for cmd=%p.\n", 3368 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 3369 cmd); 3370 goto queuing_error; 3371 } 3372 3373 if (more_dsd_lists <= ha->gbl_dsd_avail) 3374 goto sufficient_dsds; 3375 else 3376 more_dsd_lists -= ha->gbl_dsd_avail; 3377 3378 for (i = 0; i < more_dsd_lists; i++) { 3379 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 3380 if (!dsd_ptr) { 3381 ql_log(ql_log_fatal, vha, 0x300e, 3382 "Failed to allocate memory for dsd_dma " 3383 "for cmd=%p.\n", cmd); 3384 goto queuing_error; 3385 } 3386 3387 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 3388 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 3389 if (!dsd_ptr->dsd_addr) { 3390 kfree(dsd_ptr); 3391 ql_log(ql_log_fatal, vha, 0x300f, 3392 "Failed to allocate memory for dsd_addr " 3393 "for cmd=%p.\n", cmd); 3394 goto queuing_error; 3395 } 3396 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 3397 ha->gbl_dsd_avail++; 3398 } 3399 3400 sufficient_dsds: 3401 req_cnt = 1; 3402 3403 if (req->cnt < (req_cnt + 2)) { 3404 cnt = (uint16_t)rd_reg_dword_relaxed( 3405 ®->req_q_out[0]); 3406 if (req->ring_index < cnt) 3407 req->cnt = cnt - req->ring_index; 3408 else 3409 req->cnt = req->length - 3410 (req->ring_index - cnt); 3411 if (req->cnt < (req_cnt + 2)) 3412 goto queuing_error; 3413 } 3414 3415 ctx = sp->u.scmd.ct6_ctx = 3416 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 3417 if (!ctx) { 3418 ql_log(ql_log_fatal, vha, 0x3010, 3419 "Failed to allocate ctx for cmd=%p.\n", cmd); 3420 goto queuing_error; 3421 } 3422 3423 memset(ctx, 0, sizeof(struct ct6_dsd)); 3424 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, 3425 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 3426 if (!ctx->fcp_cmnd) { 3427 ql_log(ql_log_fatal, vha, 0x3011, 3428 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 3429 goto queuing_error; 3430 } 3431 3432 /* Initialize the DSD list and dma handle */ 3433 INIT_LIST_HEAD(&ctx->dsd_list); 3434 ctx->dsd_use_cnt = 0; 3435 3436 if (cmd->cmd_len > 16) { 3437 additional_cdb_len = cmd->cmd_len - 16; 3438 if ((cmd->cmd_len % 4) != 0) { 3439 /* SCSI command bigger than 16 bytes must be 3440 * multiple of 4 3441 */ 3442 ql_log(ql_log_warn, vha, 0x3012, 3443 "scsi cmd len %d not multiple of 4 " 3444 "for cmd=%p.\n", cmd->cmd_len, cmd); 3445 goto queuing_error_fcp_cmnd; 3446 } 3447 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 3448 } else { 3449 additional_cdb_len = 0; 3450 ctx->fcp_cmnd_len = 12 + 16 + 4; 3451 } 3452 3453 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 3454 cmd_pkt->handle = make_handle(req->id, handle); 3455 3456 /* Zero out remaining portion of packet. */ 3457 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3458 clr_ptr = (uint32_t *)cmd_pkt + 2; 3459 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3460 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3461 3462 /* Set NPORT-ID and LUN number*/ 3463 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3464 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3465 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3466 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3467 cmd_pkt->vp_index = sp->vha->vp_idx; 3468 3469 /* Build IOCB segments */ 3470 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 3471 goto queuing_error_fcp_cmnd; 3472 3473 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3474 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 3475 3476 /* build FCP_CMND IU */ 3477 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 3478 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 3479 3480 if (cmd->sc_data_direction == DMA_TO_DEVICE) 3481 ctx->fcp_cmnd->additional_cdb_len |= 1; 3482 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 3483 ctx->fcp_cmnd->additional_cdb_len |= 2; 3484 3485 /* Populate the FCP_PRIO. */ 3486 if (ha->flags.fcp_prio_enabled) 3487 ctx->fcp_cmnd->task_attribute |= 3488 sp->fcport->fcp_prio << 3; 3489 3490 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 3491 3492 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + 3493 additional_cdb_len); 3494 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 3495 3496 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 3497 put_unaligned_le64(ctx->fcp_cmnd_dma, 3498 &cmd_pkt->fcp_cmnd_dseg_address); 3499 3500 sp->flags |= SRB_FCP_CMND_DMA_VALID; 3501 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3502 /* Set total data segment count. */ 3503 cmd_pkt->entry_count = (uint8_t)req_cnt; 3504 /* Specify response queue number where 3505 * completion should happen 3506 */ 3507 cmd_pkt->entry_status = (uint8_t) rsp->id; 3508 } else { 3509 struct cmd_type_7 *cmd_pkt; 3510 3511 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3512 if (req->cnt < (req_cnt + 2)) { 3513 cnt = (uint16_t)rd_reg_dword_relaxed( 3514 ®->req_q_out[0]); 3515 if (req->ring_index < cnt) 3516 req->cnt = cnt - req->ring_index; 3517 else 3518 req->cnt = req->length - 3519 (req->ring_index - cnt); 3520 } 3521 if (req->cnt < (req_cnt + 2)) 3522 goto queuing_error; 3523 3524 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 3525 cmd_pkt->handle = make_handle(req->id, handle); 3526 3527 /* Zero out remaining portion of packet. */ 3528 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3529 clr_ptr = (uint32_t *)cmd_pkt + 2; 3530 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3531 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3532 3533 /* Set NPORT-ID and LUN number*/ 3534 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3535 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3536 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3537 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3538 cmd_pkt->vp_index = sp->vha->vp_idx; 3539 3540 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3541 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 3542 sizeof(cmd_pkt->lun)); 3543 3544 /* Populate the FCP_PRIO. */ 3545 if (ha->flags.fcp_prio_enabled) 3546 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 3547 3548 /* Load SCSI command packet. */ 3549 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 3550 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 3551 3552 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3553 3554 /* Build IOCB segments */ 3555 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 3556 3557 /* Set total data segment count. */ 3558 cmd_pkt->entry_count = (uint8_t)req_cnt; 3559 /* Specify response queue number where 3560 * completion should happen. 3561 */ 3562 cmd_pkt->entry_status = (uint8_t) rsp->id; 3563 3564 } 3565 /* Build command packet. */ 3566 req->current_outstanding_cmd = handle; 3567 req->outstanding_cmds[handle] = sp; 3568 sp->handle = handle; 3569 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3570 req->cnt -= req_cnt; 3571 wmb(); 3572 3573 /* Adjust ring index. */ 3574 req->ring_index++; 3575 if (req->ring_index == req->length) { 3576 req->ring_index = 0; 3577 req->ring_ptr = req->ring; 3578 } else 3579 req->ring_ptr++; 3580 3581 sp->flags |= SRB_DMA_VALID; 3582 3583 /* Set chip new ring index. */ 3584 /* write, read and verify logic */ 3585 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3586 if (ql2xdbwr) 3587 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3588 else { 3589 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3590 wmb(); 3591 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { 3592 wrt_reg_dword(ha->nxdb_wr_ptr, dbval); 3593 wmb(); 3594 } 3595 } 3596 3597 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3598 if (vha->flags.process_response_queue && 3599 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3600 qla24xx_process_response_queue(vha, rsp); 3601 3602 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3603 return QLA_SUCCESS; 3604 3605 queuing_error_fcp_cmnd: 3606 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3607 queuing_error: 3608 if (tot_dsds) 3609 scsi_dma_unmap(cmd); 3610 3611 if (sp->u.scmd.crc_ctx) { 3612 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); 3613 sp->u.scmd.crc_ctx = NULL; 3614 } 3615 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3616 3617 return QLA_FUNCTION_FAILED; 3618 } 3619 3620 static void 3621 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3622 { 3623 struct srb_iocb *aio = &sp->u.iocb_cmd; 3624 scsi_qla_host_t *vha = sp->vha; 3625 struct req_que *req = sp->qpair->req; 3626 srb_t *orig_sp = sp->cmd_sp; 3627 3628 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3629 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3630 abt_iocb->entry_count = 1; 3631 abt_iocb->handle = make_handle(req->id, sp->handle); 3632 if (sp->fcport) { 3633 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3634 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3635 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3636 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3637 } 3638 abt_iocb->handle_to_abort = 3639 make_handle(le16_to_cpu(aio->u.abt.req_que_no), 3640 aio->u.abt.cmd_hndl); 3641 abt_iocb->vp_index = vha->vp_idx; 3642 abt_iocb->req_que_no = aio->u.abt.req_que_no; 3643 3644 /* need to pass original sp */ 3645 if (orig_sp) 3646 qla_nvme_abort_set_option(abt_iocb, orig_sp); 3647 3648 /* Send the command to the firmware */ 3649 wmb(); 3650 } 3651 3652 static void 3653 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) 3654 { 3655 int i, sz; 3656 3657 mbx->entry_type = MBX_IOCB_TYPE; 3658 mbx->handle = sp->handle; 3659 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); 3660 3661 for (i = 0; i < sz; i++) 3662 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; 3663 } 3664 3665 static void 3666 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) 3667 { 3668 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; 3669 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); 3670 ct_pkt->handle = sp->handle; 3671 } 3672 3673 static void qla2x00_send_notify_ack_iocb(srb_t *sp, 3674 struct nack_to_isp *nack) 3675 { 3676 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; 3677 3678 nack->entry_type = NOTIFY_ACK_TYPE; 3679 nack->entry_count = 1; 3680 nack->ox_id = ntfy->ox_id; 3681 3682 nack->u.isp24.handle = sp->handle; 3683 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3684 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3685 nack->u.isp24.flags = ntfy->u.isp24.flags & 3686 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); 3687 } 3688 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3689 nack->u.isp24.status = ntfy->u.isp24.status; 3690 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3691 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3692 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3693 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3694 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3695 nack->u.isp24.srr_flags = 0; 3696 nack->u.isp24.srr_reject_code = 0; 3697 nack->u.isp24.srr_reject_code_expl = 0; 3698 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3699 } 3700 3701 /* 3702 * Build NVME LS request 3703 */ 3704 static void 3705 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) 3706 { 3707 struct srb_iocb *nvme; 3708 3709 nvme = &sp->u.iocb_cmd; 3710 cmd_pkt->entry_type = PT_LS4_REQUEST; 3711 cmd_pkt->entry_count = 1; 3712 cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); 3713 3714 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); 3715 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3716 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 3717 3718 cmd_pkt->tx_dseg_count = cpu_to_le16(1); 3719 cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); 3720 cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); 3721 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); 3722 3723 cmd_pkt->rx_dseg_count = cpu_to_le16(1); 3724 cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); 3725 cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); 3726 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); 3727 } 3728 3729 static void 3730 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) 3731 { 3732 int map, pos; 3733 3734 vce->entry_type = VP_CTRL_IOCB_TYPE; 3735 vce->handle = sp->handle; 3736 vce->entry_count = 1; 3737 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); 3738 vce->vp_count = cpu_to_le16(1); 3739 3740 /* 3741 * index map in firmware starts with 1; decrement index 3742 * this is ok as we never use index 0 3743 */ 3744 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; 3745 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; 3746 vce->vp_idx_map[map] |= 1 << pos; 3747 } 3748 3749 static void 3750 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) 3751 { 3752 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 3753 logio->control_flags = 3754 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); 3755 3756 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3757 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 3758 logio->port_id[1] = sp->fcport->d_id.b.area; 3759 logio->port_id[2] = sp->fcport->d_id.b.domain; 3760 logio->vp_index = sp->fcport->vha->vp_idx; 3761 } 3762 3763 int 3764 qla2x00_start_sp(srb_t *sp) 3765 { 3766 int rval = QLA_SUCCESS; 3767 scsi_qla_host_t *vha = sp->vha; 3768 struct qla_hw_data *ha = vha->hw; 3769 struct qla_qpair *qp = sp->qpair; 3770 void *pkt; 3771 unsigned long flags; 3772 3773 if (vha->hw->flags.eeh_busy) 3774 return -EIO; 3775 3776 spin_lock_irqsave(qp->qp_lock_ptr, flags); 3777 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); 3778 if (!pkt) { 3779 rval = EAGAIN; 3780 ql_log(ql_log_warn, vha, 0x700c, 3781 "qla2x00_alloc_iocbs failed.\n"); 3782 goto done; 3783 } 3784 3785 switch (sp->type) { 3786 case SRB_LOGIN_CMD: 3787 IS_FWI2_CAPABLE(ha) ? 3788 qla24xx_login_iocb(sp, pkt) : 3789 qla2x00_login_iocb(sp, pkt); 3790 break; 3791 case SRB_PRLI_CMD: 3792 qla24xx_prli_iocb(sp, pkt); 3793 break; 3794 case SRB_LOGOUT_CMD: 3795 IS_FWI2_CAPABLE(ha) ? 3796 qla24xx_logout_iocb(sp, pkt) : 3797 qla2x00_logout_iocb(sp, pkt); 3798 break; 3799 case SRB_ELS_CMD_RPT: 3800 case SRB_ELS_CMD_HST: 3801 qla24xx_els_iocb(sp, pkt); 3802 break; 3803 case SRB_CT_CMD: 3804 IS_FWI2_CAPABLE(ha) ? 3805 qla24xx_ct_iocb(sp, pkt) : 3806 qla2x00_ct_iocb(sp, pkt); 3807 break; 3808 case SRB_ADISC_CMD: 3809 IS_FWI2_CAPABLE(ha) ? 3810 qla24xx_adisc_iocb(sp, pkt) : 3811 qla2x00_adisc_iocb(sp, pkt); 3812 break; 3813 case SRB_TM_CMD: 3814 IS_QLAFX00(ha) ? 3815 qlafx00_tm_iocb(sp, pkt) : 3816 qla24xx_tm_iocb(sp, pkt); 3817 break; 3818 case SRB_FXIOCB_DCMD: 3819 case SRB_FXIOCB_BCMD: 3820 qlafx00_fxdisc_iocb(sp, pkt); 3821 break; 3822 case SRB_NVME_LS: 3823 qla_nvme_ls(sp, pkt); 3824 break; 3825 case SRB_ABT_CMD: 3826 IS_QLAFX00(ha) ? 3827 qlafx00_abort_iocb(sp, pkt) : 3828 qla24xx_abort_iocb(sp, pkt); 3829 break; 3830 case SRB_ELS_DCMD: 3831 qla24xx_els_logo_iocb(sp, pkt); 3832 break; 3833 case SRB_CT_PTHRU_CMD: 3834 qla2x00_ctpthru_cmd_iocb(sp, pkt); 3835 break; 3836 case SRB_MB_IOCB: 3837 qla2x00_mb_iocb(sp, pkt); 3838 break; 3839 case SRB_NACK_PLOGI: 3840 case SRB_NACK_PRLI: 3841 case SRB_NACK_LOGO: 3842 qla2x00_send_notify_ack_iocb(sp, pkt); 3843 break; 3844 case SRB_CTRL_VP: 3845 qla25xx_ctrlvp_iocb(sp, pkt); 3846 break; 3847 case SRB_PRLO_CMD: 3848 qla24xx_prlo_iocb(sp, pkt); 3849 break; 3850 default: 3851 break; 3852 } 3853 3854 if (sp->start_timer) 3855 add_timer(&sp->u.iocb_cmd.timer); 3856 3857 wmb(); 3858 qla2x00_start_iocbs(vha, qp->req); 3859 done: 3860 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 3861 return rval; 3862 } 3863 3864 static void 3865 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 3866 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3867 { 3868 uint16_t avail_dsds; 3869 struct dsd64 *cur_dsd; 3870 uint32_t req_data_len = 0; 3871 uint32_t rsp_data_len = 0; 3872 struct scatterlist *sg; 3873 int index; 3874 int entry_count = 1; 3875 struct bsg_job *bsg_job = sp->u.bsg_job; 3876 3877 /*Update entry type to indicate bidir command */ 3878 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); 3879 3880 /* Set the transfer direction, in this set both flags 3881 * Also set the BD_WRAP_BACK flag, firmware will take care 3882 * assigning DID=SID for outgoing pkts. 3883 */ 3884 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3885 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3886 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 3887 BD_WRAP_BACK); 3888 3889 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 3890 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 3891 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 3892 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3893 3894 vha->bidi_stats.transfer_bytes += req_data_len; 3895 vha->bidi_stats.io_count++; 3896 3897 vha->qla_stats.output_bytes += req_data_len; 3898 vha->qla_stats.output_requests++; 3899 3900 /* Only one dsd is available for bidirectional IOCB, remaining dsds 3901 * are bundled in continuation iocb 3902 */ 3903 avail_dsds = 1; 3904 cur_dsd = &cmd_pkt->fcp_dsd; 3905 3906 index = 0; 3907 3908 for_each_sg(bsg_job->request_payload.sg_list, sg, 3909 bsg_job->request_payload.sg_cnt, index) { 3910 cont_a64_entry_t *cont_pkt; 3911 3912 /* Allocate additional continuation packets */ 3913 if (avail_dsds == 0) { 3914 /* Continuation type 1 IOCB can accomodate 3915 * 5 DSDS 3916 */ 3917 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3918 cur_dsd = cont_pkt->dsd; 3919 avail_dsds = 5; 3920 entry_count++; 3921 } 3922 append_dsd64(&cur_dsd, sg); 3923 avail_dsds--; 3924 } 3925 /* For read request DSD will always goes to continuation IOCB 3926 * and follow the write DSD. If there is room on the current IOCB 3927 * then it is added to that IOCB else new continuation IOCB is 3928 * allocated. 3929 */ 3930 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3931 bsg_job->reply_payload.sg_cnt, index) { 3932 cont_a64_entry_t *cont_pkt; 3933 3934 /* Allocate additional continuation packets */ 3935 if (avail_dsds == 0) { 3936 /* Continuation type 1 IOCB can accomodate 3937 * 5 DSDS 3938 */ 3939 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3940 cur_dsd = cont_pkt->dsd; 3941 avail_dsds = 5; 3942 entry_count++; 3943 } 3944 append_dsd64(&cur_dsd, sg); 3945 avail_dsds--; 3946 } 3947 /* This value should be same as number of IOCB required for this cmd */ 3948 cmd_pkt->entry_count = entry_count; 3949 } 3950 3951 int 3952 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 3953 { 3954 3955 struct qla_hw_data *ha = vha->hw; 3956 unsigned long flags; 3957 uint32_t handle; 3958 uint16_t req_cnt; 3959 uint16_t cnt; 3960 uint32_t *clr_ptr; 3961 struct cmd_bidir *cmd_pkt = NULL; 3962 struct rsp_que *rsp; 3963 struct req_que *req; 3964 int rval = EXT_STATUS_OK; 3965 3966 rval = QLA_SUCCESS; 3967 3968 rsp = ha->rsp_q_map[0]; 3969 req = vha->req; 3970 3971 /* Send marker if required */ 3972 if (vha->marker_needed != 0) { 3973 if (qla2x00_marker(vha, ha->base_qpair, 3974 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 3975 return EXT_STATUS_MAILBOX; 3976 vha->marker_needed = 0; 3977 } 3978 3979 /* Acquire ring specific lock */ 3980 spin_lock_irqsave(&ha->hardware_lock, flags); 3981 3982 handle = qla2xxx_get_next_handle(req); 3983 if (handle == 0) { 3984 rval = EXT_STATUS_BUSY; 3985 goto queuing_error; 3986 } 3987 3988 /* Calculate number of IOCB required */ 3989 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3990 3991 /* Check for room on request queue. */ 3992 if (req->cnt < req_cnt + 2) { 3993 if (IS_SHADOW_REG_CAPABLE(ha)) { 3994 cnt = *req->out_ptr; 3995 } else { 3996 cnt = rd_reg_dword_relaxed(req->req_q_out); 3997 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) 3998 goto queuing_error; 3999 } 4000 4001 if (req->ring_index < cnt) 4002 req->cnt = cnt - req->ring_index; 4003 else 4004 req->cnt = req->length - 4005 (req->ring_index - cnt); 4006 } 4007 if (req->cnt < req_cnt + 2) { 4008 rval = EXT_STATUS_BUSY; 4009 goto queuing_error; 4010 } 4011 4012 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 4013 cmd_pkt->handle = make_handle(req->id, handle); 4014 4015 /* Zero out remaining portion of packet. */ 4016 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 4017 clr_ptr = (uint32_t *)cmd_pkt + 2; 4018 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 4019 4020 /* Set NPORT-ID (of vha)*/ 4021 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 4022 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 4023 cmd_pkt->port_id[1] = vha->d_id.b.area; 4024 cmd_pkt->port_id[2] = vha->d_id.b.domain; 4025 4026 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 4027 cmd_pkt->entry_status = (uint8_t) rsp->id; 4028 /* Build command packet. */ 4029 req->current_outstanding_cmd = handle; 4030 req->outstanding_cmds[handle] = sp; 4031 sp->handle = handle; 4032 req->cnt -= req_cnt; 4033 4034 /* Send the command to the firmware */ 4035 wmb(); 4036 qla2x00_start_iocbs(vha, req); 4037 queuing_error: 4038 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4039 4040 return rval; 4041 } 4042