1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/blkdev.h> 11 #include <linux/delay.h> 12 13 #include <scsi/scsi_tcq.h> 14 15 static void qla25xx_set_que(srb_t *, struct rsp_que **); 16 /** 17 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 18 * @cmd: SCSI command 19 * 20 * Returns the proper CF_* direction based on CDB. 21 */ 22 static inline uint16_t 23 qla2x00_get_cmd_direction(srb_t *sp) 24 { 25 uint16_t cflags; 26 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 27 struct scsi_qla_host *vha = sp->fcport->vha; 28 29 cflags = 0; 30 31 /* Set transfer direction */ 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 33 cflags = CF_WRITE; 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 35 vha->qla_stats.output_requests++; 36 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 37 cflags = CF_READ; 38 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 39 vha->qla_stats.input_requests++; 40 } 41 return (cflags); 42 } 43 44 /** 45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 46 * Continuation Type 0 IOCBs to allocate. 47 * 48 * @dsds: number of data segment decriptors needed 49 * 50 * Returns the number of IOCB entries needed to store @dsds. 51 */ 52 uint16_t 53 qla2x00_calc_iocbs_32(uint16_t dsds) 54 { 55 uint16_t iocbs; 56 57 iocbs = 1; 58 if (dsds > 3) { 59 iocbs += (dsds - 3) / 7; 60 if ((dsds - 3) % 7) 61 iocbs++; 62 } 63 return (iocbs); 64 } 65 66 /** 67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 68 * Continuation Type 1 IOCBs to allocate. 69 * 70 * @dsds: number of data segment decriptors needed 71 * 72 * Returns the number of IOCB entries needed to store @dsds. 73 */ 74 uint16_t 75 qla2x00_calc_iocbs_64(uint16_t dsds) 76 { 77 uint16_t iocbs; 78 79 iocbs = 1; 80 if (dsds > 2) { 81 iocbs += (dsds - 2) / 5; 82 if ((dsds - 2) % 5) 83 iocbs++; 84 } 85 return (iocbs); 86 } 87 88 /** 89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 90 * @ha: HA context 91 * 92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 */ 94 static inline cont_entry_t * 95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 96 { 97 cont_entry_t *cont_pkt; 98 struct req_que *req = vha->req; 99 /* Adjust ring index. */ 100 req->ring_index++; 101 if (req->ring_index == req->length) { 102 req->ring_index = 0; 103 req->ring_ptr = req->ring; 104 } else { 105 req->ring_ptr++; 106 } 107 108 cont_pkt = (cont_entry_t *)req->ring_ptr; 109 110 /* Load packet defaults. */ 111 *((uint32_t *)(&cont_pkt->entry_type)) = 112 __constant_cpu_to_le32(CONTINUE_TYPE); 113 114 return (cont_pkt); 115 } 116 117 /** 118 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 119 * @ha: HA context 120 * 121 * Returns a pointer to the continuation type 1 IOCB packet. 122 */ 123 static inline cont_a64_entry_t * 124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 125 { 126 cont_a64_entry_t *cont_pkt; 127 128 /* Adjust ring index. */ 129 req->ring_index++; 130 if (req->ring_index == req->length) { 131 req->ring_index = 0; 132 req->ring_ptr = req->ring; 133 } else { 134 req->ring_ptr++; 135 } 136 137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 138 139 /* Load packet defaults. */ 140 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? 141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) : 142 __constant_cpu_to_le32(CONTINUE_A64_TYPE); 143 144 return (cont_pkt); 145 } 146 147 static inline int 148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 149 { 150 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 151 uint8_t guard = scsi_host_get_guard(cmd->device->host); 152 153 /* We always use DIFF Bundling for best performance */ 154 *fw_prot_opts = 0; 155 156 /* Translate SCSI opcode to a protection opcode */ 157 switch (scsi_get_prot_op(cmd)) { 158 case SCSI_PROT_READ_STRIP: 159 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 160 break; 161 case SCSI_PROT_WRITE_INSERT: 162 *fw_prot_opts |= PO_MODE_DIF_INSERT; 163 break; 164 case SCSI_PROT_READ_INSERT: 165 *fw_prot_opts |= PO_MODE_DIF_INSERT; 166 break; 167 case SCSI_PROT_WRITE_STRIP: 168 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 169 break; 170 case SCSI_PROT_READ_PASS: 171 case SCSI_PROT_WRITE_PASS: 172 if (guard & SHOST_DIX_GUARD_IP) 173 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 174 else 175 *fw_prot_opts |= PO_MODE_DIF_PASS; 176 break; 177 default: /* Normal Request */ 178 *fw_prot_opts |= PO_MODE_DIF_PASS; 179 break; 180 } 181 182 return scsi_prot_sg_count(cmd); 183 } 184 185 /* 186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 187 * capable IOCB types. 188 * 189 * @sp: SRB command to process 190 * @cmd_pkt: Command type 2 IOCB 191 * @tot_dsds: Total number of segments to transfer 192 */ 193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 194 uint16_t tot_dsds) 195 { 196 uint16_t avail_dsds; 197 uint32_t *cur_dsd; 198 scsi_qla_host_t *vha; 199 struct scsi_cmnd *cmd; 200 struct scatterlist *sg; 201 int i; 202 203 cmd = GET_CMD_SP(sp); 204 205 /* Update entry type to indicate Command Type 2 IOCB */ 206 *((uint32_t *)(&cmd_pkt->entry_type)) = 207 __constant_cpu_to_le32(COMMAND_TYPE); 208 209 /* No data transfer */ 210 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 211 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 212 return; 213 } 214 215 vha = sp->fcport->vha; 216 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 217 218 /* Three DSDs are available in the Command Type 2 IOCB */ 219 avail_dsds = 3; 220 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 221 222 /* Load data segments */ 223 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 224 cont_entry_t *cont_pkt; 225 226 /* Allocate additional continuation packets? */ 227 if (avail_dsds == 0) { 228 /* 229 * Seven DSDs are available in the Continuation 230 * Type 0 IOCB. 231 */ 232 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 233 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 234 avail_dsds = 7; 235 } 236 237 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); 238 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 239 avail_dsds--; 240 } 241 } 242 243 /** 244 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 245 * capable IOCB types. 246 * 247 * @sp: SRB command to process 248 * @cmd_pkt: Command type 3 IOCB 249 * @tot_dsds: Total number of segments to transfer 250 */ 251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 252 uint16_t tot_dsds) 253 { 254 uint16_t avail_dsds; 255 uint32_t *cur_dsd; 256 scsi_qla_host_t *vha; 257 struct scsi_cmnd *cmd; 258 struct scatterlist *sg; 259 int i; 260 261 cmd = GET_CMD_SP(sp); 262 263 /* Update entry type to indicate Command Type 3 IOCB */ 264 *((uint32_t *)(&cmd_pkt->entry_type)) = 265 __constant_cpu_to_le32(COMMAND_A64_TYPE); 266 267 /* No data transfer */ 268 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 269 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 270 return; 271 } 272 273 vha = sp->fcport->vha; 274 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 275 276 /* Two DSDs are available in the Command Type 3 IOCB */ 277 avail_dsds = 2; 278 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 279 280 /* Load data segments */ 281 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 282 dma_addr_t sle_dma; 283 cont_a64_entry_t *cont_pkt; 284 285 /* Allocate additional continuation packets? */ 286 if (avail_dsds == 0) { 287 /* 288 * Five DSDs are available in the Continuation 289 * Type 1 IOCB. 290 */ 291 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 292 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 293 avail_dsds = 5; 294 } 295 296 sle_dma = sg_dma_address(sg); 297 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 298 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 299 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 300 avail_dsds--; 301 } 302 } 303 304 /** 305 * qla2x00_start_scsi() - Send a SCSI command to the ISP 306 * @sp: command to send to the ISP 307 * 308 * Returns non-zero if a failure occurred, else zero. 309 */ 310 int 311 qla2x00_start_scsi(srb_t *sp) 312 { 313 int ret, nseg; 314 unsigned long flags; 315 scsi_qla_host_t *vha; 316 struct scsi_cmnd *cmd; 317 uint32_t *clr_ptr; 318 uint32_t index; 319 uint32_t handle; 320 cmd_entry_t *cmd_pkt; 321 uint16_t cnt; 322 uint16_t req_cnt; 323 uint16_t tot_dsds; 324 struct device_reg_2xxx __iomem *reg; 325 struct qla_hw_data *ha; 326 struct req_que *req; 327 struct rsp_que *rsp; 328 329 /* Setup device pointers. */ 330 ret = 0; 331 vha = sp->fcport->vha; 332 ha = vha->hw; 333 reg = &ha->iobase->isp; 334 cmd = GET_CMD_SP(sp); 335 req = ha->req_q_map[0]; 336 rsp = ha->rsp_q_map[0]; 337 /* So we know we haven't pci_map'ed anything yet */ 338 tot_dsds = 0; 339 340 /* Send marker if required */ 341 if (vha->marker_needed != 0) { 342 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 343 QLA_SUCCESS) { 344 return (QLA_FUNCTION_FAILED); 345 } 346 vha->marker_needed = 0; 347 } 348 349 /* Acquire ring specific lock */ 350 spin_lock_irqsave(&ha->hardware_lock, flags); 351 352 /* Check for room in outstanding command list. */ 353 handle = req->current_outstanding_cmd; 354 for (index = 1; index < req->num_outstanding_cmds; index++) { 355 handle++; 356 if (handle == req->num_outstanding_cmds) 357 handle = 1; 358 if (!req->outstanding_cmds[handle]) 359 break; 360 } 361 if (index == req->num_outstanding_cmds) 362 goto queuing_error; 363 364 /* Map the sg table so we have an accurate count of sg entries needed */ 365 if (scsi_sg_count(cmd)) { 366 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 367 scsi_sg_count(cmd), cmd->sc_data_direction); 368 if (unlikely(!nseg)) 369 goto queuing_error; 370 } else 371 nseg = 0; 372 373 tot_dsds = nseg; 374 375 /* Calculate the number of request entries needed. */ 376 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 377 if (req->cnt < (req_cnt + 2)) { 378 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 379 if (req->ring_index < cnt) 380 req->cnt = cnt - req->ring_index; 381 else 382 req->cnt = req->length - 383 (req->ring_index - cnt); 384 /* If still no head room then bail out */ 385 if (req->cnt < (req_cnt + 2)) 386 goto queuing_error; 387 } 388 389 /* Build command packet */ 390 req->current_outstanding_cmd = handle; 391 req->outstanding_cmds[handle] = sp; 392 sp->handle = handle; 393 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 394 req->cnt -= req_cnt; 395 396 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 397 cmd_pkt->handle = handle; 398 /* Zero out remaining portion of packet. */ 399 clr_ptr = (uint32_t *)cmd_pkt + 2; 400 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 401 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 402 403 /* Set target ID and LUN number*/ 404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 406 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); 407 408 /* Load SCSI command packet. */ 409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 410 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 411 412 /* Build IOCB segments */ 413 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 414 415 /* Set total data segment count. */ 416 cmd_pkt->entry_count = (uint8_t)req_cnt; 417 wmb(); 418 419 /* Adjust ring index. */ 420 req->ring_index++; 421 if (req->ring_index == req->length) { 422 req->ring_index = 0; 423 req->ring_ptr = req->ring; 424 } else 425 req->ring_ptr++; 426 427 sp->flags |= SRB_DMA_VALID; 428 429 /* Set chip new ring index. */ 430 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); 431 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 432 433 /* Manage unprocessed RIO/ZIO commands in response queue. */ 434 if (vha->flags.process_response_queue && 435 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 436 qla2x00_process_response_queue(rsp); 437 438 spin_unlock_irqrestore(&ha->hardware_lock, flags); 439 return (QLA_SUCCESS); 440 441 queuing_error: 442 if (tot_dsds) 443 scsi_dma_unmap(cmd); 444 445 spin_unlock_irqrestore(&ha->hardware_lock, flags); 446 447 return (QLA_FUNCTION_FAILED); 448 } 449 450 /** 451 * qla2x00_start_iocbs() - Execute the IOCB command 452 */ 453 void 454 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 455 { 456 struct qla_hw_data *ha = vha->hw; 457 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 458 459 if (IS_P3P_TYPE(ha)) { 460 qla82xx_start_iocbs(vha); 461 } else { 462 /* Adjust ring index. */ 463 req->ring_index++; 464 if (req->ring_index == req->length) { 465 req->ring_index = 0; 466 req->ring_ptr = req->ring; 467 } else 468 req->ring_ptr++; 469 470 /* Set chip new ring index. */ 471 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 472 WRT_REG_DWORD(req->req_q_in, req->ring_index); 473 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 474 } else if (IS_QLAFX00(ha)) { 475 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); 476 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); 477 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 478 } else if (IS_FWI2_CAPABLE(ha)) { 479 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); 480 RD_REG_DWORD_RELAXED(®->isp24.req_q_in); 481 } else { 482 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), 483 req->ring_index); 484 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); 485 } 486 } 487 } 488 489 /** 490 * qla2x00_marker() - Send a marker IOCB to the firmware. 491 * @ha: HA context 492 * @loop_id: loop ID 493 * @lun: LUN 494 * @type: marker modifier 495 * 496 * Can be called from both normal and interrupt context. 497 * 498 * Returns non-zero if a failure occurred, else zero. 499 */ 500 static int 501 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 502 struct rsp_que *rsp, uint16_t loop_id, 503 uint64_t lun, uint8_t type) 504 { 505 mrk_entry_t *mrk; 506 struct mrk_entry_24xx *mrk24 = NULL; 507 508 struct qla_hw_data *ha = vha->hw; 509 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 510 511 req = ha->req_q_map[0]; 512 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); 513 if (mrk == NULL) { 514 ql_log(ql_log_warn, base_vha, 0x3026, 515 "Failed to allocate Marker IOCB.\n"); 516 517 return (QLA_FUNCTION_FAILED); 518 } 519 520 mrk->entry_type = MARKER_TYPE; 521 mrk->modifier = type; 522 if (type != MK_SYNC_ALL) { 523 if (IS_FWI2_CAPABLE(ha)) { 524 mrk24 = (struct mrk_entry_24xx *) mrk; 525 mrk24->nport_handle = cpu_to_le16(loop_id); 526 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 527 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 528 mrk24->vp_index = vha->vp_idx; 529 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); 530 } else { 531 SET_TARGET_ID(ha, mrk->target, loop_id); 532 mrk->lun = cpu_to_le16((uint16_t)lun); 533 } 534 } 535 wmb(); 536 537 qla2x00_start_iocbs(vha, req); 538 539 return (QLA_SUCCESS); 540 } 541 542 int 543 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 544 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, 545 uint8_t type) 546 { 547 int ret; 548 unsigned long flags = 0; 549 550 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 551 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); 552 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 553 554 return (ret); 555 } 556 557 /* 558 * qla2x00_issue_marker 559 * 560 * Issue marker 561 * Caller CAN have hardware lock held as specified by ha_locked parameter. 562 * Might release it, then reaquire. 563 */ 564 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 565 { 566 if (ha_locked) { 567 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 568 MK_SYNC_ALL) != QLA_SUCCESS) 569 return QLA_FUNCTION_FAILED; 570 } else { 571 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 572 MK_SYNC_ALL) != QLA_SUCCESS) 573 return QLA_FUNCTION_FAILED; 574 } 575 vha->marker_needed = 0; 576 577 return QLA_SUCCESS; 578 } 579 580 static inline int 581 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 582 uint16_t tot_dsds) 583 { 584 uint32_t *cur_dsd = NULL; 585 scsi_qla_host_t *vha; 586 struct qla_hw_data *ha; 587 struct scsi_cmnd *cmd; 588 struct scatterlist *cur_seg; 589 uint32_t *dsd_seg; 590 void *next_dsd; 591 uint8_t avail_dsds; 592 uint8_t first_iocb = 1; 593 uint32_t dsd_list_len; 594 struct dsd_dma *dsd_ptr; 595 struct ct6_dsd *ctx; 596 597 cmd = GET_CMD_SP(sp); 598 599 /* Update entry type to indicate Command Type 3 IOCB */ 600 *((uint32_t *)(&cmd_pkt->entry_type)) = 601 __constant_cpu_to_le32(COMMAND_TYPE_6); 602 603 /* No data transfer */ 604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 605 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 606 return 0; 607 } 608 609 vha = sp->fcport->vha; 610 ha = vha->hw; 611 612 /* Set transfer direction */ 613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 614 cmd_pkt->control_flags = 615 __constant_cpu_to_le16(CF_WRITE_DATA); 616 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 617 vha->qla_stats.output_requests++; 618 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 619 cmd_pkt->control_flags = 620 __constant_cpu_to_le16(CF_READ_DATA); 621 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 622 vha->qla_stats.input_requests++; 623 } 624 625 cur_seg = scsi_sglist(cmd); 626 ctx = GET_CMD_CTX_SP(sp); 627 628 while (tot_dsds) { 629 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 630 QLA_DSDS_PER_IOCB : tot_dsds; 631 tot_dsds -= avail_dsds; 632 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 633 634 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 635 struct dsd_dma, list); 636 next_dsd = dsd_ptr->dsd_addr; 637 list_del(&dsd_ptr->list); 638 ha->gbl_dsd_avail--; 639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 640 ctx->dsd_use_cnt++; 641 ha->gbl_dsd_inuse++; 642 643 if (first_iocb) { 644 first_iocb = 0; 645 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 646 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 647 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 648 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); 649 } else { 650 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 651 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 652 *cur_dsd++ = cpu_to_le32(dsd_list_len); 653 } 654 cur_dsd = (uint32_t *)next_dsd; 655 while (avail_dsds) { 656 dma_addr_t sle_dma; 657 658 sle_dma = sg_dma_address(cur_seg); 659 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 660 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 661 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 662 cur_seg = sg_next(cur_seg); 663 avail_dsds--; 664 } 665 } 666 667 /* Null termination */ 668 *cur_dsd++ = 0; 669 *cur_dsd++ = 0; 670 *cur_dsd++ = 0; 671 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 672 return 0; 673 } 674 675 /* 676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 677 * for Command Type 6. 678 * 679 * @dsds: number of data segment decriptors needed 680 * 681 * Returns the number of dsd list needed to store @dsds. 682 */ 683 inline uint16_t 684 qla24xx_calc_dsd_lists(uint16_t dsds) 685 { 686 uint16_t dsd_lists = 0; 687 688 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 689 if (dsds % QLA_DSDS_PER_IOCB) 690 dsd_lists++; 691 return dsd_lists; 692 } 693 694 695 /** 696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 697 * IOCB types. 698 * 699 * @sp: SRB command to process 700 * @cmd_pkt: Command type 3 IOCB 701 * @tot_dsds: Total number of segments to transfer 702 */ 703 inline void 704 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 705 uint16_t tot_dsds) 706 { 707 uint16_t avail_dsds; 708 uint32_t *cur_dsd; 709 scsi_qla_host_t *vha; 710 struct scsi_cmnd *cmd; 711 struct scatterlist *sg; 712 int i; 713 struct req_que *req; 714 715 cmd = GET_CMD_SP(sp); 716 717 /* Update entry type to indicate Command Type 3 IOCB */ 718 *((uint32_t *)(&cmd_pkt->entry_type)) = 719 __constant_cpu_to_le32(COMMAND_TYPE_7); 720 721 /* No data transfer */ 722 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 723 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 724 return; 725 } 726 727 vha = sp->fcport->vha; 728 req = vha->req; 729 730 /* Set transfer direction */ 731 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 732 cmd_pkt->task_mgmt_flags = 733 __constant_cpu_to_le16(TMF_WRITE_DATA); 734 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 735 vha->qla_stats.output_requests++; 736 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 737 cmd_pkt->task_mgmt_flags = 738 __constant_cpu_to_le16(TMF_READ_DATA); 739 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 740 vha->qla_stats.input_requests++; 741 } 742 743 /* One DSD is available in the Command Type 3 IOCB */ 744 avail_dsds = 1; 745 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 746 747 /* Load data segments */ 748 749 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 750 dma_addr_t sle_dma; 751 cont_a64_entry_t *cont_pkt; 752 753 /* Allocate additional continuation packets? */ 754 if (avail_dsds == 0) { 755 /* 756 * Five DSDs are available in the Continuation 757 * Type 1 IOCB. 758 */ 759 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 760 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 761 avail_dsds = 5; 762 } 763 764 sle_dma = sg_dma_address(sg); 765 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 766 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 767 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 768 avail_dsds--; 769 } 770 } 771 772 struct fw_dif_context { 773 uint32_t ref_tag; 774 uint16_t app_tag; 775 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 776 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 777 }; 778 779 /* 780 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 781 * 782 */ 783 static inline void 784 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 785 unsigned int protcnt) 786 { 787 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 788 789 switch (scsi_get_prot_type(cmd)) { 790 case SCSI_PROT_DIF_TYPE0: 791 /* 792 * No check for ql2xenablehba_err_chk, as it would be an 793 * I/O error if hba tag generation is not done. 794 */ 795 pkt->ref_tag = cpu_to_le32((uint32_t) 796 (0xffffffff & scsi_get_lba(cmd))); 797 798 if (!qla2x00_hba_err_chk_enabled(sp)) 799 break; 800 801 pkt->ref_tag_mask[0] = 0xff; 802 pkt->ref_tag_mask[1] = 0xff; 803 pkt->ref_tag_mask[2] = 0xff; 804 pkt->ref_tag_mask[3] = 0xff; 805 break; 806 807 /* 808 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 809 * match LBA in CDB + N 810 */ 811 case SCSI_PROT_DIF_TYPE2: 812 pkt->app_tag = __constant_cpu_to_le16(0); 813 pkt->app_tag_mask[0] = 0x0; 814 pkt->app_tag_mask[1] = 0x0; 815 816 pkt->ref_tag = cpu_to_le32((uint32_t) 817 (0xffffffff & scsi_get_lba(cmd))); 818 819 if (!qla2x00_hba_err_chk_enabled(sp)) 820 break; 821 822 /* enable ALL bytes of the ref tag */ 823 pkt->ref_tag_mask[0] = 0xff; 824 pkt->ref_tag_mask[1] = 0xff; 825 pkt->ref_tag_mask[2] = 0xff; 826 pkt->ref_tag_mask[3] = 0xff; 827 break; 828 829 /* For Type 3 protection: 16 bit GUARD only */ 830 case SCSI_PROT_DIF_TYPE3: 831 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 832 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 833 0x00; 834 break; 835 836 /* 837 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 838 * 16 bit app tag. 839 */ 840 case SCSI_PROT_DIF_TYPE1: 841 pkt->ref_tag = cpu_to_le32((uint32_t) 842 (0xffffffff & scsi_get_lba(cmd))); 843 pkt->app_tag = __constant_cpu_to_le16(0); 844 pkt->app_tag_mask[0] = 0x0; 845 pkt->app_tag_mask[1] = 0x0; 846 847 if (!qla2x00_hba_err_chk_enabled(sp)) 848 break; 849 850 /* enable ALL bytes of the ref tag */ 851 pkt->ref_tag_mask[0] = 0xff; 852 pkt->ref_tag_mask[1] = 0xff; 853 pkt->ref_tag_mask[2] = 0xff; 854 pkt->ref_tag_mask[3] = 0xff; 855 break; 856 } 857 } 858 859 struct qla2_sgx { 860 dma_addr_t dma_addr; /* OUT */ 861 uint32_t dma_len; /* OUT */ 862 863 uint32_t tot_bytes; /* IN */ 864 struct scatterlist *cur_sg; /* IN */ 865 866 /* for book keeping, bzero on initial invocation */ 867 uint32_t bytes_consumed; 868 uint32_t num_bytes; 869 uint32_t tot_partial; 870 871 /* for debugging */ 872 uint32_t num_sg; 873 srb_t *sp; 874 }; 875 876 static int 877 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 878 uint32_t *partial) 879 { 880 struct scatterlist *sg; 881 uint32_t cumulative_partial, sg_len; 882 dma_addr_t sg_dma_addr; 883 884 if (sgx->num_bytes == sgx->tot_bytes) 885 return 0; 886 887 sg = sgx->cur_sg; 888 cumulative_partial = sgx->tot_partial; 889 890 sg_dma_addr = sg_dma_address(sg); 891 sg_len = sg_dma_len(sg); 892 893 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 894 895 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 896 sgx->dma_len = (blk_sz - cumulative_partial); 897 sgx->tot_partial = 0; 898 sgx->num_bytes += blk_sz; 899 *partial = 0; 900 } else { 901 sgx->dma_len = sg_len - sgx->bytes_consumed; 902 sgx->tot_partial += sgx->dma_len; 903 *partial = 1; 904 } 905 906 sgx->bytes_consumed += sgx->dma_len; 907 908 if (sg_len == sgx->bytes_consumed) { 909 sg = sg_next(sg); 910 sgx->num_sg++; 911 sgx->cur_sg = sg; 912 sgx->bytes_consumed = 0; 913 } 914 915 return 1; 916 } 917 918 int 919 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 920 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 921 { 922 void *next_dsd; 923 uint8_t avail_dsds = 0; 924 uint32_t dsd_list_len; 925 struct dsd_dma *dsd_ptr; 926 struct scatterlist *sg_prot; 927 uint32_t *cur_dsd = dsd; 928 uint16_t used_dsds = tot_dsds; 929 930 uint32_t prot_int; /* protection interval */ 931 uint32_t partial; 932 struct qla2_sgx sgx; 933 dma_addr_t sle_dma; 934 uint32_t sle_dma_len, tot_prot_dma_len = 0; 935 struct scsi_cmnd *cmd; 936 struct scsi_qla_host *vha; 937 938 memset(&sgx, 0, sizeof(struct qla2_sgx)); 939 if (sp) { 940 vha = sp->fcport->vha; 941 cmd = GET_CMD_SP(sp); 942 prot_int = cmd->device->sector_size; 943 944 sgx.tot_bytes = scsi_bufflen(cmd); 945 sgx.cur_sg = scsi_sglist(cmd); 946 sgx.sp = sp; 947 948 sg_prot = scsi_prot_sglist(cmd); 949 } else if (tc) { 950 vha = tc->vha; 951 prot_int = tc->blk_sz; 952 sgx.tot_bytes = tc->bufflen; 953 sgx.cur_sg = tc->sg; 954 sg_prot = tc->prot_sg; 955 } else { 956 BUG(); 957 return 1; 958 } 959 960 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 961 962 sle_dma = sgx.dma_addr; 963 sle_dma_len = sgx.dma_len; 964 alloc_and_fill: 965 /* Allocate additional continuation packets? */ 966 if (avail_dsds == 0) { 967 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 968 QLA_DSDS_PER_IOCB : used_dsds; 969 dsd_list_len = (avail_dsds + 1) * 12; 970 used_dsds -= avail_dsds; 971 972 /* allocate tracking DS */ 973 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 974 if (!dsd_ptr) 975 return 1; 976 977 /* allocate new list */ 978 dsd_ptr->dsd_addr = next_dsd = 979 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 980 &dsd_ptr->dsd_list_dma); 981 982 if (!next_dsd) { 983 /* 984 * Need to cleanup only this dsd_ptr, rest 985 * will be done by sp_free_dma() 986 */ 987 kfree(dsd_ptr); 988 return 1; 989 } 990 991 if (sp) { 992 list_add_tail(&dsd_ptr->list, 993 &((struct crc_context *) 994 sp->u.scmd.ctx)->dsd_list); 995 996 sp->flags |= SRB_CRC_CTX_DSD_VALID; 997 } else { 998 list_add_tail(&dsd_ptr->list, 999 &(tc->ctx->dsd_list)); 1000 tc->ctx_dsd_alloced = 1; 1001 } 1002 1003 1004 /* add new list to cmd iocb or last list */ 1005 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1006 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1007 *cur_dsd++ = dsd_list_len; 1008 cur_dsd = (uint32_t *)next_dsd; 1009 } 1010 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1011 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1012 *cur_dsd++ = cpu_to_le32(sle_dma_len); 1013 avail_dsds--; 1014 1015 if (partial == 0) { 1016 /* Got a full protection interval */ 1017 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 1018 sle_dma_len = 8; 1019 1020 tot_prot_dma_len += sle_dma_len; 1021 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 1022 tot_prot_dma_len = 0; 1023 sg_prot = sg_next(sg_prot); 1024 } 1025 1026 partial = 1; /* So as to not re-enter this block */ 1027 goto alloc_and_fill; 1028 } 1029 } 1030 /* Null termination */ 1031 *cur_dsd++ = 0; 1032 *cur_dsd++ = 0; 1033 *cur_dsd++ = 0; 1034 return 0; 1035 } 1036 1037 int 1038 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1039 uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1040 { 1041 void *next_dsd; 1042 uint8_t avail_dsds = 0; 1043 uint32_t dsd_list_len; 1044 struct dsd_dma *dsd_ptr; 1045 struct scatterlist *sg, *sgl; 1046 uint32_t *cur_dsd = dsd; 1047 int i; 1048 uint16_t used_dsds = tot_dsds; 1049 struct scsi_cmnd *cmd; 1050 struct scsi_qla_host *vha; 1051 1052 if (sp) { 1053 cmd = GET_CMD_SP(sp); 1054 sgl = scsi_sglist(cmd); 1055 vha = sp->fcport->vha; 1056 } else if (tc) { 1057 sgl = tc->sg; 1058 vha = tc->vha; 1059 } else { 1060 BUG(); 1061 return 1; 1062 } 1063 1064 1065 for_each_sg(sgl, sg, tot_dsds, i) { 1066 dma_addr_t sle_dma; 1067 1068 /* Allocate additional continuation packets? */ 1069 if (avail_dsds == 0) { 1070 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1071 QLA_DSDS_PER_IOCB : used_dsds; 1072 dsd_list_len = (avail_dsds + 1) * 12; 1073 used_dsds -= avail_dsds; 1074 1075 /* allocate tracking DS */ 1076 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1077 if (!dsd_ptr) 1078 return 1; 1079 1080 /* allocate new list */ 1081 dsd_ptr->dsd_addr = next_dsd = 1082 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1083 &dsd_ptr->dsd_list_dma); 1084 1085 if (!next_dsd) { 1086 /* 1087 * Need to cleanup only this dsd_ptr, rest 1088 * will be done by sp_free_dma() 1089 */ 1090 kfree(dsd_ptr); 1091 return 1; 1092 } 1093 1094 if (sp) { 1095 list_add_tail(&dsd_ptr->list, 1096 &((struct crc_context *) 1097 sp->u.scmd.ctx)->dsd_list); 1098 1099 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1100 } else { 1101 list_add_tail(&dsd_ptr->list, 1102 &(tc->ctx->dsd_list)); 1103 tc->ctx_dsd_alloced = 1; 1104 } 1105 1106 /* add new list to cmd iocb or last list */ 1107 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1108 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1109 *cur_dsd++ = dsd_list_len; 1110 cur_dsd = (uint32_t *)next_dsd; 1111 } 1112 sle_dma = sg_dma_address(sg); 1113 1114 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1115 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1116 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1117 avail_dsds--; 1118 1119 } 1120 /* Null termination */ 1121 *cur_dsd++ = 0; 1122 *cur_dsd++ = 0; 1123 *cur_dsd++ = 0; 1124 return 0; 1125 } 1126 1127 int 1128 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1129 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1130 { 1131 void *next_dsd; 1132 uint8_t avail_dsds = 0; 1133 uint32_t dsd_list_len; 1134 struct dsd_dma *dsd_ptr; 1135 struct scatterlist *sg, *sgl; 1136 int i; 1137 struct scsi_cmnd *cmd; 1138 uint32_t *cur_dsd = dsd; 1139 uint16_t used_dsds = tot_dsds; 1140 struct scsi_qla_host *vha; 1141 1142 if (sp) { 1143 cmd = GET_CMD_SP(sp); 1144 sgl = scsi_prot_sglist(cmd); 1145 vha = sp->fcport->vha; 1146 } else if (tc) { 1147 vha = tc->vha; 1148 sgl = tc->prot_sg; 1149 } else { 1150 BUG(); 1151 return 1; 1152 } 1153 1154 ql_dbg(ql_dbg_tgt, vha, 0xe021, 1155 "%s: enter\n", __func__); 1156 1157 for_each_sg(sgl, sg, tot_dsds, i) { 1158 dma_addr_t sle_dma; 1159 1160 /* Allocate additional continuation packets? */ 1161 if (avail_dsds == 0) { 1162 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1163 QLA_DSDS_PER_IOCB : used_dsds; 1164 dsd_list_len = (avail_dsds + 1) * 12; 1165 used_dsds -= avail_dsds; 1166 1167 /* allocate tracking DS */ 1168 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1169 if (!dsd_ptr) 1170 return 1; 1171 1172 /* allocate new list */ 1173 dsd_ptr->dsd_addr = next_dsd = 1174 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1175 &dsd_ptr->dsd_list_dma); 1176 1177 if (!next_dsd) { 1178 /* 1179 * Need to cleanup only this dsd_ptr, rest 1180 * will be done by sp_free_dma() 1181 */ 1182 kfree(dsd_ptr); 1183 return 1; 1184 } 1185 1186 if (sp) { 1187 list_add_tail(&dsd_ptr->list, 1188 &((struct crc_context *) 1189 sp->u.scmd.ctx)->dsd_list); 1190 1191 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1192 } else { 1193 list_add_tail(&dsd_ptr->list, 1194 &(tc->ctx->dsd_list)); 1195 tc->ctx_dsd_alloced = 1; 1196 } 1197 1198 /* add new list to cmd iocb or last list */ 1199 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1200 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1201 *cur_dsd++ = dsd_list_len; 1202 cur_dsd = (uint32_t *)next_dsd; 1203 } 1204 sle_dma = sg_dma_address(sg); 1205 1206 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1207 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1208 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1209 1210 avail_dsds--; 1211 } 1212 /* Null termination */ 1213 *cur_dsd++ = 0; 1214 *cur_dsd++ = 0; 1215 *cur_dsd++ = 0; 1216 return 0; 1217 } 1218 1219 /** 1220 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1221 * Type 6 IOCB types. 1222 * 1223 * @sp: SRB command to process 1224 * @cmd_pkt: Command type 3 IOCB 1225 * @tot_dsds: Total number of segments to transfer 1226 */ 1227 static inline int 1228 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1229 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1230 { 1231 uint32_t *cur_dsd, *fcp_dl; 1232 scsi_qla_host_t *vha; 1233 struct scsi_cmnd *cmd; 1234 int sgc; 1235 uint32_t total_bytes = 0; 1236 uint32_t data_bytes; 1237 uint32_t dif_bytes; 1238 uint8_t bundling = 1; 1239 uint16_t blk_size; 1240 uint8_t *clr_ptr; 1241 struct crc_context *crc_ctx_pkt = NULL; 1242 struct qla_hw_data *ha; 1243 uint8_t additional_fcpcdb_len; 1244 uint16_t fcp_cmnd_len; 1245 struct fcp_cmnd *fcp_cmnd; 1246 dma_addr_t crc_ctx_dma; 1247 1248 cmd = GET_CMD_SP(sp); 1249 1250 sgc = 0; 1251 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1252 *((uint32_t *)(&cmd_pkt->entry_type)) = 1253 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); 1254 1255 vha = sp->fcport->vha; 1256 ha = vha->hw; 1257 1258 /* No data transfer */ 1259 data_bytes = scsi_bufflen(cmd); 1260 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1261 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1262 return QLA_SUCCESS; 1263 } 1264 1265 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1266 1267 /* Set transfer direction */ 1268 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1269 cmd_pkt->control_flags = 1270 __constant_cpu_to_le16(CF_WRITE_DATA); 1271 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1272 cmd_pkt->control_flags = 1273 __constant_cpu_to_le16(CF_READ_DATA); 1274 } 1275 1276 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1277 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1278 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1279 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1280 bundling = 0; 1281 1282 /* Allocate CRC context from global pool */ 1283 crc_ctx_pkt = sp->u.scmd.ctx = 1284 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1285 1286 if (!crc_ctx_pkt) 1287 goto crc_queuing_error; 1288 1289 /* Zero out CTX area. */ 1290 clr_ptr = (uint8_t *)crc_ctx_pkt; 1291 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 1292 1293 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1294 1295 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1296 1297 /* Set handle */ 1298 crc_ctx_pkt->handle = cmd_pkt->handle; 1299 1300 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1301 1302 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1303 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1304 1305 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 1306 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 1307 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 1308 1309 /* Determine SCSI command length -- align to 4 byte boundary */ 1310 if (cmd->cmd_len > 16) { 1311 additional_fcpcdb_len = cmd->cmd_len - 16; 1312 if ((cmd->cmd_len % 4) != 0) { 1313 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1314 goto crc_queuing_error; 1315 } 1316 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1317 } else { 1318 additional_fcpcdb_len = 0; 1319 fcp_cmnd_len = 12 + 16 + 4; 1320 } 1321 1322 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1323 1324 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1325 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1326 fcp_cmnd->additional_cdb_len |= 1; 1327 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1328 fcp_cmnd->additional_cdb_len |= 2; 1329 1330 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1331 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1332 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1333 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1334 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1335 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( 1336 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1337 fcp_cmnd->task_management = 0; 1338 fcp_cmnd->task_attribute = TSK_SIMPLE; 1339 1340 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1341 1342 /* Compute dif len and adjust data len to incude protection */ 1343 dif_bytes = 0; 1344 blk_size = cmd->device->sector_size; 1345 dif_bytes = (data_bytes / blk_size) * 8; 1346 1347 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1348 case SCSI_PROT_READ_INSERT: 1349 case SCSI_PROT_WRITE_STRIP: 1350 total_bytes = data_bytes; 1351 data_bytes += dif_bytes; 1352 break; 1353 1354 case SCSI_PROT_READ_STRIP: 1355 case SCSI_PROT_WRITE_INSERT: 1356 case SCSI_PROT_READ_PASS: 1357 case SCSI_PROT_WRITE_PASS: 1358 total_bytes = data_bytes + dif_bytes; 1359 break; 1360 default: 1361 BUG(); 1362 } 1363 1364 if (!qla2x00_hba_err_chk_enabled(sp)) 1365 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1366 /* HBA error checking enabled */ 1367 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1368 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1369 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1370 SCSI_PROT_DIF_TYPE2)) 1371 fw_prot_opts |= BIT_10; 1372 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1373 SCSI_PROT_DIF_TYPE3) 1374 fw_prot_opts |= BIT_11; 1375 } 1376 1377 if (!bundling) { 1378 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1379 } else { 1380 /* 1381 * Configure Bundling if we need to fetch interlaving 1382 * protection PCI accesses 1383 */ 1384 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1385 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1386 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1387 tot_prot_dsds); 1388 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 1389 } 1390 1391 /* Finish the common fields of CRC pkt */ 1392 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1393 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1394 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1395 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); 1396 /* Fibre channel byte count */ 1397 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1398 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1399 additional_fcpcdb_len); 1400 *fcp_dl = htonl(total_bytes); 1401 1402 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1403 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1404 return QLA_SUCCESS; 1405 } 1406 /* Walks data segments */ 1407 1408 cmd_pkt->control_flags |= 1409 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1410 1411 if (!bundling && tot_prot_dsds) { 1412 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1413 cur_dsd, tot_dsds, NULL)) 1414 goto crc_queuing_error; 1415 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1416 (tot_dsds - tot_prot_dsds), NULL)) 1417 goto crc_queuing_error; 1418 1419 if (bundling && tot_prot_dsds) { 1420 /* Walks dif segments */ 1421 cmd_pkt->control_flags |= 1422 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1423 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1424 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1425 tot_prot_dsds, NULL)) 1426 goto crc_queuing_error; 1427 } 1428 return QLA_SUCCESS; 1429 1430 crc_queuing_error: 1431 /* Cleanup will be performed by the caller */ 1432 1433 return QLA_FUNCTION_FAILED; 1434 } 1435 1436 /** 1437 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1438 * @sp: command to send to the ISP 1439 * 1440 * Returns non-zero if a failure occurred, else zero. 1441 */ 1442 int 1443 qla24xx_start_scsi(srb_t *sp) 1444 { 1445 int ret, nseg; 1446 unsigned long flags; 1447 uint32_t *clr_ptr; 1448 uint32_t index; 1449 uint32_t handle; 1450 struct cmd_type_7 *cmd_pkt; 1451 uint16_t cnt; 1452 uint16_t req_cnt; 1453 uint16_t tot_dsds; 1454 struct req_que *req = NULL; 1455 struct rsp_que *rsp = NULL; 1456 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1457 struct scsi_qla_host *vha = sp->fcport->vha; 1458 struct qla_hw_data *ha = vha->hw; 1459 1460 /* Setup device pointers. */ 1461 ret = 0; 1462 1463 qla25xx_set_que(sp, &rsp); 1464 req = vha->req; 1465 1466 /* So we know we haven't pci_map'ed anything yet */ 1467 tot_dsds = 0; 1468 1469 /* Send marker if required */ 1470 if (vha->marker_needed != 0) { 1471 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1472 QLA_SUCCESS) 1473 return QLA_FUNCTION_FAILED; 1474 vha->marker_needed = 0; 1475 } 1476 1477 /* Acquire ring specific lock */ 1478 spin_lock_irqsave(&ha->hardware_lock, flags); 1479 1480 /* Check for room in outstanding command list. */ 1481 handle = req->current_outstanding_cmd; 1482 for (index = 1; index < req->num_outstanding_cmds; index++) { 1483 handle++; 1484 if (handle == req->num_outstanding_cmds) 1485 handle = 1; 1486 if (!req->outstanding_cmds[handle]) 1487 break; 1488 } 1489 if (index == req->num_outstanding_cmds) 1490 goto queuing_error; 1491 1492 /* Map the sg table so we have an accurate count of sg entries needed */ 1493 if (scsi_sg_count(cmd)) { 1494 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1495 scsi_sg_count(cmd), cmd->sc_data_direction); 1496 if (unlikely(!nseg)) 1497 goto queuing_error; 1498 } else 1499 nseg = 0; 1500 1501 tot_dsds = nseg; 1502 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1503 if (req->cnt < (req_cnt + 2)) { 1504 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1505 RD_REG_DWORD_RELAXED(req->req_q_out); 1506 if (req->ring_index < cnt) 1507 req->cnt = cnt - req->ring_index; 1508 else 1509 req->cnt = req->length - 1510 (req->ring_index - cnt); 1511 if (req->cnt < (req_cnt + 2)) 1512 goto queuing_error; 1513 } 1514 1515 /* Build command packet. */ 1516 req->current_outstanding_cmd = handle; 1517 req->outstanding_cmds[handle] = sp; 1518 sp->handle = handle; 1519 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1520 req->cnt -= req_cnt; 1521 1522 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1523 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1524 1525 /* Zero out remaining portion of packet. */ 1526 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1527 clr_ptr = (uint32_t *)cmd_pkt + 2; 1528 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1529 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1530 1531 /* Set NPORT-ID and LUN number*/ 1532 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1533 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1534 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1535 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1536 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1537 1538 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1539 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1540 1541 cmd_pkt->task = TSK_SIMPLE; 1542 1543 /* Load SCSI command packet. */ 1544 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1545 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1546 1547 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1548 1549 /* Build IOCB segments */ 1550 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 1551 1552 /* Set total data segment count. */ 1553 cmd_pkt->entry_count = (uint8_t)req_cnt; 1554 /* Specify response queue number where completion should happen */ 1555 cmd_pkt->entry_status = (uint8_t) rsp->id; 1556 wmb(); 1557 /* Adjust ring index. */ 1558 req->ring_index++; 1559 if (req->ring_index == req->length) { 1560 req->ring_index = 0; 1561 req->ring_ptr = req->ring; 1562 } else 1563 req->ring_ptr++; 1564 1565 sp->flags |= SRB_DMA_VALID; 1566 1567 /* Set chip new ring index. */ 1568 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1569 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1570 1571 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1572 if (vha->flags.process_response_queue && 1573 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1574 qla24xx_process_response_queue(vha, rsp); 1575 1576 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1577 return QLA_SUCCESS; 1578 1579 queuing_error: 1580 if (tot_dsds) 1581 scsi_dma_unmap(cmd); 1582 1583 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1584 1585 return QLA_FUNCTION_FAILED; 1586 } 1587 1588 /** 1589 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1590 * @sp: command to send to the ISP 1591 * 1592 * Returns non-zero if a failure occurred, else zero. 1593 */ 1594 int 1595 qla24xx_dif_start_scsi(srb_t *sp) 1596 { 1597 int nseg; 1598 unsigned long flags; 1599 uint32_t *clr_ptr; 1600 uint32_t index; 1601 uint32_t handle; 1602 uint16_t cnt; 1603 uint16_t req_cnt = 0; 1604 uint16_t tot_dsds; 1605 uint16_t tot_prot_dsds; 1606 uint16_t fw_prot_opts = 0; 1607 struct req_que *req = NULL; 1608 struct rsp_que *rsp = NULL; 1609 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1610 struct scsi_qla_host *vha = sp->fcport->vha; 1611 struct qla_hw_data *ha = vha->hw; 1612 struct cmd_type_crc_2 *cmd_pkt; 1613 uint32_t status = 0; 1614 1615 #define QDSS_GOT_Q_SPACE BIT_0 1616 1617 /* Only process protection or >16 cdb in this routine */ 1618 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1619 if (cmd->cmd_len <= 16) 1620 return qla24xx_start_scsi(sp); 1621 } 1622 1623 /* Setup device pointers. */ 1624 1625 qla25xx_set_que(sp, &rsp); 1626 req = vha->req; 1627 1628 /* So we know we haven't pci_map'ed anything yet */ 1629 tot_dsds = 0; 1630 1631 /* Send marker if required */ 1632 if (vha->marker_needed != 0) { 1633 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1634 QLA_SUCCESS) 1635 return QLA_FUNCTION_FAILED; 1636 vha->marker_needed = 0; 1637 } 1638 1639 /* Acquire ring specific lock */ 1640 spin_lock_irqsave(&ha->hardware_lock, flags); 1641 1642 /* Check for room in outstanding command list. */ 1643 handle = req->current_outstanding_cmd; 1644 for (index = 1; index < req->num_outstanding_cmds; index++) { 1645 handle++; 1646 if (handle == req->num_outstanding_cmds) 1647 handle = 1; 1648 if (!req->outstanding_cmds[handle]) 1649 break; 1650 } 1651 1652 if (index == req->num_outstanding_cmds) 1653 goto queuing_error; 1654 1655 /* Compute number of required data segments */ 1656 /* Map the sg table so we have an accurate count of sg entries needed */ 1657 if (scsi_sg_count(cmd)) { 1658 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1659 scsi_sg_count(cmd), cmd->sc_data_direction); 1660 if (unlikely(!nseg)) 1661 goto queuing_error; 1662 else 1663 sp->flags |= SRB_DMA_VALID; 1664 1665 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1666 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1667 struct qla2_sgx sgx; 1668 uint32_t partial; 1669 1670 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1671 sgx.tot_bytes = scsi_bufflen(cmd); 1672 sgx.cur_sg = scsi_sglist(cmd); 1673 sgx.sp = sp; 1674 1675 nseg = 0; 1676 while (qla24xx_get_one_block_sg( 1677 cmd->device->sector_size, &sgx, &partial)) 1678 nseg++; 1679 } 1680 } else 1681 nseg = 0; 1682 1683 /* number of required data segments */ 1684 tot_dsds = nseg; 1685 1686 /* Compute number of required protection segments */ 1687 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1688 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1689 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1690 if (unlikely(!nseg)) 1691 goto queuing_error; 1692 else 1693 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1694 1695 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1696 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1697 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1698 } 1699 } else { 1700 nseg = 0; 1701 } 1702 1703 req_cnt = 1; 1704 /* Total Data and protection sg segment(s) */ 1705 tot_prot_dsds = nseg; 1706 tot_dsds += nseg; 1707 if (req->cnt < (req_cnt + 2)) { 1708 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1709 RD_REG_DWORD_RELAXED(req->req_q_out); 1710 if (req->ring_index < cnt) 1711 req->cnt = cnt - req->ring_index; 1712 else 1713 req->cnt = req->length - 1714 (req->ring_index - cnt); 1715 if (req->cnt < (req_cnt + 2)) 1716 goto queuing_error; 1717 } 1718 1719 status |= QDSS_GOT_Q_SPACE; 1720 1721 /* Build header part of command packet (excluding the OPCODE). */ 1722 req->current_outstanding_cmd = handle; 1723 req->outstanding_cmds[handle] = sp; 1724 sp->handle = handle; 1725 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1726 req->cnt -= req_cnt; 1727 1728 /* Fill-in common area */ 1729 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1730 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1731 1732 clr_ptr = (uint32_t *)cmd_pkt + 2; 1733 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1734 1735 /* Set NPORT-ID and LUN number*/ 1736 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1737 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1738 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1739 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1740 1741 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1742 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1743 1744 /* Total Data and protection segment(s) */ 1745 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1746 1747 /* Build IOCB segments and adjust for data protection segments */ 1748 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1749 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1750 QLA_SUCCESS) 1751 goto queuing_error; 1752 1753 cmd_pkt->entry_count = (uint8_t)req_cnt; 1754 /* Specify response queue number where completion should happen */ 1755 cmd_pkt->entry_status = (uint8_t) rsp->id; 1756 cmd_pkt->timeout = __constant_cpu_to_le16(0); 1757 wmb(); 1758 1759 /* Adjust ring index. */ 1760 req->ring_index++; 1761 if (req->ring_index == req->length) { 1762 req->ring_index = 0; 1763 req->ring_ptr = req->ring; 1764 } else 1765 req->ring_ptr++; 1766 1767 /* Set chip new ring index. */ 1768 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1769 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1770 1771 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1772 if (vha->flags.process_response_queue && 1773 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1774 qla24xx_process_response_queue(vha, rsp); 1775 1776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1777 1778 return QLA_SUCCESS; 1779 1780 queuing_error: 1781 if (status & QDSS_GOT_Q_SPACE) { 1782 req->outstanding_cmds[handle] = NULL; 1783 req->cnt += req_cnt; 1784 } 1785 /* Cleanup will be performed by the caller (queuecommand) */ 1786 1787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1788 return QLA_FUNCTION_FAILED; 1789 } 1790 1791 1792 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1793 { 1794 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1795 struct qla_hw_data *ha = sp->fcport->vha->hw; 1796 int affinity = cmd->request->cpu; 1797 1798 if (ha->flags.cpu_affinity_enabled && affinity >= 0 && 1799 affinity < ha->max_rsp_queues - 1) 1800 *rsp = ha->rsp_q_map[affinity + 1]; 1801 else 1802 *rsp = ha->rsp_q_map[0]; 1803 } 1804 1805 /* Generic Control-SRB manipulation functions. */ 1806 1807 /* hardware_lock assumed to be held. */ 1808 void * 1809 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) 1810 { 1811 if (qla2x00_reset_active(vha)) 1812 return NULL; 1813 1814 return qla2x00_alloc_iocbs(vha, sp); 1815 } 1816 1817 void * 1818 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) 1819 { 1820 struct qla_hw_data *ha = vha->hw; 1821 struct req_que *req = ha->req_q_map[0]; 1822 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 1823 uint32_t index, handle; 1824 request_t *pkt; 1825 uint16_t cnt, req_cnt; 1826 1827 pkt = NULL; 1828 req_cnt = 1; 1829 handle = 0; 1830 1831 if (!sp) 1832 goto skip_cmd_array; 1833 1834 /* Check for room in outstanding command list. */ 1835 handle = req->current_outstanding_cmd; 1836 for (index = 1; index < req->num_outstanding_cmds; index++) { 1837 handle++; 1838 if (handle == req->num_outstanding_cmds) 1839 handle = 1; 1840 if (!req->outstanding_cmds[handle]) 1841 break; 1842 } 1843 if (index == req->num_outstanding_cmds) { 1844 ql_log(ql_log_warn, vha, 0x700b, 1845 "No room on outstanding cmd array.\n"); 1846 goto queuing_error; 1847 } 1848 1849 /* Prep command array. */ 1850 req->current_outstanding_cmd = handle; 1851 req->outstanding_cmds[handle] = sp; 1852 sp->handle = handle; 1853 1854 /* Adjust entry-counts as needed. */ 1855 if (sp->type != SRB_SCSI_CMD) 1856 req_cnt = sp->iocbs; 1857 1858 skip_cmd_array: 1859 /* Check for room on request queue. */ 1860 if (req->cnt < req_cnt + 2) { 1861 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 1862 cnt = RD_REG_DWORD(®->isp25mq.req_q_out); 1863 else if (IS_P3P_TYPE(ha)) 1864 cnt = RD_REG_DWORD(®->isp82.req_q_out); 1865 else if (IS_FWI2_CAPABLE(ha)) 1866 cnt = RD_REG_DWORD(®->isp24.req_q_out); 1867 else if (IS_QLAFX00(ha)) 1868 cnt = RD_REG_DWORD(®->ispfx00.req_q_out); 1869 else 1870 cnt = qla2x00_debounce_register( 1871 ISP_REQ_Q_OUT(ha, ®->isp)); 1872 1873 if (req->ring_index < cnt) 1874 req->cnt = cnt - req->ring_index; 1875 else 1876 req->cnt = req->length - 1877 (req->ring_index - cnt); 1878 } 1879 if (req->cnt < req_cnt + 2) 1880 goto queuing_error; 1881 1882 /* Prep packet */ 1883 req->cnt -= req_cnt; 1884 pkt = req->ring_ptr; 1885 memset(pkt, 0, REQUEST_ENTRY_SIZE); 1886 if (IS_QLAFX00(ha)) { 1887 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); 1888 WRT_REG_WORD((void __iomem *)&pkt->handle, handle); 1889 } else { 1890 pkt->entry_count = req_cnt; 1891 pkt->handle = handle; 1892 } 1893 1894 queuing_error: 1895 return pkt; 1896 } 1897 1898 static void 1899 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1900 { 1901 struct srb_iocb *lio = &sp->u.iocb_cmd; 1902 1903 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1904 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 1905 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 1906 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 1907 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 1908 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 1909 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1910 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1911 logio->port_id[1] = sp->fcport->d_id.b.area; 1912 logio->port_id[2] = sp->fcport->d_id.b.domain; 1913 logio->vp_index = sp->fcport->vha->vp_idx; 1914 } 1915 1916 static void 1917 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 1918 { 1919 struct qla_hw_data *ha = sp->fcport->vha->hw; 1920 struct srb_iocb *lio = &sp->u.iocb_cmd; 1921 uint16_t opts; 1922 1923 mbx->entry_type = MBX_IOCB_TYPE; 1924 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1925 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 1926 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 1927 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 1928 if (HAS_EXTENDED_IDS(ha)) { 1929 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 1930 mbx->mb10 = cpu_to_le16(opts); 1931 } else { 1932 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 1933 } 1934 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1935 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1936 sp->fcport->d_id.b.al_pa); 1937 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1938 } 1939 1940 static void 1941 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1942 { 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1944 logio->control_flags = 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1949 logio->port_id[2] = sp->fcport->d_id.b.domain; 1950 logio->vp_index = sp->fcport->vha->vp_idx; 1951 } 1952 1953 static void 1954 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 1955 { 1956 struct qla_hw_data *ha = sp->fcport->vha->hw; 1957 1958 mbx->entry_type = MBX_IOCB_TYPE; 1959 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1960 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 1961 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 1962 cpu_to_le16(sp->fcport->loop_id): 1963 cpu_to_le16(sp->fcport->loop_id << 8); 1964 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1965 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1966 sp->fcport->d_id.b.al_pa); 1967 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1968 /* Implicit: mbx->mbx10 = 0. */ 1969 } 1970 1971 static void 1972 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1973 { 1974 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1975 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 1976 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1977 logio->vp_index = sp->fcport->vha->vp_idx; 1978 } 1979 1980 static void 1981 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 1982 { 1983 struct qla_hw_data *ha = sp->fcport->vha->hw; 1984 1985 mbx->entry_type = MBX_IOCB_TYPE; 1986 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1987 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 1988 if (HAS_EXTENDED_IDS(ha)) { 1989 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 1990 mbx->mb10 = cpu_to_le16(BIT_0); 1991 } else { 1992 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 1993 } 1994 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 1995 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 1996 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 1997 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 1998 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1999 } 2000 2001 static void 2002 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2003 { 2004 uint32_t flags; 2005 uint64_t lun; 2006 struct fc_port *fcport = sp->fcport; 2007 scsi_qla_host_t *vha = fcport->vha; 2008 struct qla_hw_data *ha = vha->hw; 2009 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2010 struct req_que *req = vha->req; 2011 2012 flags = iocb->u.tmf.flags; 2013 lun = iocb->u.tmf.lun; 2014 2015 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2016 tsk->entry_count = 1; 2017 tsk->handle = MAKE_HANDLE(req->id, tsk->handle); 2018 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2019 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2020 tsk->control_flags = cpu_to_le32(flags); 2021 tsk->port_id[0] = fcport->d_id.b.al_pa; 2022 tsk->port_id[1] = fcport->d_id.b.area; 2023 tsk->port_id[2] = fcport->d_id.b.domain; 2024 tsk->vp_index = fcport->vha->vp_idx; 2025 2026 if (flags == TCF_LUN_RESET) { 2027 int_to_scsilun(lun, &tsk->lun); 2028 host_to_fcp_swap((uint8_t *)&tsk->lun, 2029 sizeof(tsk->lun)); 2030 } 2031 } 2032 2033 static void 2034 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2035 { 2036 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2037 2038 els_iocb->entry_type = ELS_IOCB_TYPE; 2039 els_iocb->entry_count = 1; 2040 els_iocb->sys_define = 0; 2041 els_iocb->entry_status = 0; 2042 els_iocb->handle = sp->handle; 2043 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2044 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2045 els_iocb->vp_index = sp->fcport->vha->vp_idx; 2046 els_iocb->sof_type = EST_SOFI3; 2047 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2048 2049 els_iocb->opcode = 2050 sp->type == SRB_ELS_CMD_RPT ? 2051 bsg_job->request->rqst_data.r_els.els_code : 2052 bsg_job->request->rqst_data.h_els.command_code; 2053 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2054 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2055 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2056 els_iocb->control_flags = 0; 2057 els_iocb->rx_byte_count = 2058 cpu_to_le32(bsg_job->reply_payload.payload_len); 2059 els_iocb->tx_byte_count = 2060 cpu_to_le32(bsg_job->request_payload.payload_len); 2061 2062 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address 2063 (bsg_job->request_payload.sg_list))); 2064 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address 2065 (bsg_job->request_payload.sg_list))); 2066 els_iocb->tx_len = cpu_to_le32(sg_dma_len 2067 (bsg_job->request_payload.sg_list)); 2068 2069 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address 2070 (bsg_job->reply_payload.sg_list))); 2071 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address 2072 (bsg_job->reply_payload.sg_list))); 2073 els_iocb->rx_len = cpu_to_le32(sg_dma_len 2074 (bsg_job->reply_payload.sg_list)); 2075 2076 sp->fcport->vha->qla_stats.control_requests++; 2077 } 2078 2079 static void 2080 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 2081 { 2082 uint16_t avail_dsds; 2083 uint32_t *cur_dsd; 2084 struct scatterlist *sg; 2085 int index; 2086 uint16_t tot_dsds; 2087 scsi_qla_host_t *vha = sp->fcport->vha; 2088 struct qla_hw_data *ha = vha->hw; 2089 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2090 int loop_iterartion = 0; 2091 int cont_iocb_prsnt = 0; 2092 int entry_count = 1; 2093 2094 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 2095 ct_iocb->entry_type = CT_IOCB_TYPE; 2096 ct_iocb->entry_status = 0; 2097 ct_iocb->handle1 = sp->handle; 2098 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 2099 ct_iocb->status = __constant_cpu_to_le16(0); 2100 ct_iocb->control_flags = __constant_cpu_to_le16(0); 2101 ct_iocb->timeout = 0; 2102 ct_iocb->cmd_dsd_count = 2103 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2104 ct_iocb->total_dsd_count = 2105 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 2106 ct_iocb->req_bytecount = 2107 cpu_to_le32(bsg_job->request_payload.payload_len); 2108 ct_iocb->rsp_bytecount = 2109 cpu_to_le32(bsg_job->reply_payload.payload_len); 2110 2111 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address 2112 (bsg_job->request_payload.sg_list))); 2113 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address 2114 (bsg_job->request_payload.sg_list))); 2115 ct_iocb->dseg_req_length = ct_iocb->req_bytecount; 2116 2117 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address 2118 (bsg_job->reply_payload.sg_list))); 2119 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address 2120 (bsg_job->reply_payload.sg_list))); 2121 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; 2122 2123 avail_dsds = 1; 2124 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address; 2125 index = 0; 2126 tot_dsds = bsg_job->reply_payload.sg_cnt; 2127 2128 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2129 dma_addr_t sle_dma; 2130 cont_a64_entry_t *cont_pkt; 2131 2132 /* Allocate additional continuation packets? */ 2133 if (avail_dsds == 0) { 2134 /* 2135 * Five DSDs are available in the Cont. 2136 * Type 1 IOCB. 2137 */ 2138 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2139 vha->hw->req_q_map[0]); 2140 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2141 avail_dsds = 5; 2142 cont_iocb_prsnt = 1; 2143 entry_count++; 2144 } 2145 2146 sle_dma = sg_dma_address(sg); 2147 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2148 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2149 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2150 loop_iterartion++; 2151 avail_dsds--; 2152 } 2153 ct_iocb->entry_count = entry_count; 2154 2155 sp->fcport->vha->qla_stats.control_requests++; 2156 } 2157 2158 static void 2159 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 2160 { 2161 uint16_t avail_dsds; 2162 uint32_t *cur_dsd; 2163 struct scatterlist *sg; 2164 int index; 2165 uint16_t tot_dsds; 2166 scsi_qla_host_t *vha = sp->fcport->vha; 2167 struct qla_hw_data *ha = vha->hw; 2168 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2169 int loop_iterartion = 0; 2170 int cont_iocb_prsnt = 0; 2171 int entry_count = 1; 2172 2173 ct_iocb->entry_type = CT_IOCB_TYPE; 2174 ct_iocb->entry_status = 0; 2175 ct_iocb->sys_define = 0; 2176 ct_iocb->handle = sp->handle; 2177 2178 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2179 ct_iocb->vp_index = sp->fcport->vha->vp_idx; 2180 ct_iocb->comp_status = __constant_cpu_to_le16(0); 2181 2182 ct_iocb->cmd_dsd_count = 2183 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2184 ct_iocb->timeout = 0; 2185 ct_iocb->rsp_dsd_count = 2186 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2187 ct_iocb->rsp_byte_count = 2188 cpu_to_le32(bsg_job->reply_payload.payload_len); 2189 ct_iocb->cmd_byte_count = 2190 cpu_to_le32(bsg_job->request_payload.payload_len); 2191 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address 2192 (bsg_job->request_payload.sg_list))); 2193 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address 2194 (bsg_job->request_payload.sg_list))); 2195 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len 2196 (bsg_job->request_payload.sg_list)); 2197 2198 avail_dsds = 1; 2199 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; 2200 index = 0; 2201 tot_dsds = bsg_job->reply_payload.sg_cnt; 2202 2203 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2204 dma_addr_t sle_dma; 2205 cont_a64_entry_t *cont_pkt; 2206 2207 /* Allocate additional continuation packets? */ 2208 if (avail_dsds == 0) { 2209 /* 2210 * Five DSDs are available in the Cont. 2211 * Type 1 IOCB. 2212 */ 2213 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2214 ha->req_q_map[0]); 2215 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2216 avail_dsds = 5; 2217 cont_iocb_prsnt = 1; 2218 entry_count++; 2219 } 2220 2221 sle_dma = sg_dma_address(sg); 2222 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2223 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2224 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2225 loop_iterartion++; 2226 avail_dsds--; 2227 } 2228 ct_iocb->entry_count = entry_count; 2229 } 2230 2231 /* 2232 * qla82xx_start_scsi() - Send a SCSI command to the ISP 2233 * @sp: command to send to the ISP 2234 * 2235 * Returns non-zero if a failure occurred, else zero. 2236 */ 2237 int 2238 qla82xx_start_scsi(srb_t *sp) 2239 { 2240 int ret, nseg; 2241 unsigned long flags; 2242 struct scsi_cmnd *cmd; 2243 uint32_t *clr_ptr; 2244 uint32_t index; 2245 uint32_t handle; 2246 uint16_t cnt; 2247 uint16_t req_cnt; 2248 uint16_t tot_dsds; 2249 struct device_reg_82xx __iomem *reg; 2250 uint32_t dbval; 2251 uint32_t *fcp_dl; 2252 uint8_t additional_cdb_len; 2253 struct ct6_dsd *ctx; 2254 struct scsi_qla_host *vha = sp->fcport->vha; 2255 struct qla_hw_data *ha = vha->hw; 2256 struct req_que *req = NULL; 2257 struct rsp_que *rsp = NULL; 2258 2259 /* Setup device pointers. */ 2260 ret = 0; 2261 reg = &ha->iobase->isp82; 2262 cmd = GET_CMD_SP(sp); 2263 req = vha->req; 2264 rsp = ha->rsp_q_map[0]; 2265 2266 /* So we know we haven't pci_map'ed anything yet */ 2267 tot_dsds = 0; 2268 2269 dbval = 0x04 | (ha->portnum << 5); 2270 2271 /* Send marker if required */ 2272 if (vha->marker_needed != 0) { 2273 if (qla2x00_marker(vha, req, 2274 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 2275 ql_log(ql_log_warn, vha, 0x300c, 2276 "qla2x00_marker failed for cmd=%p.\n", cmd); 2277 return QLA_FUNCTION_FAILED; 2278 } 2279 vha->marker_needed = 0; 2280 } 2281 2282 /* Acquire ring specific lock */ 2283 spin_lock_irqsave(&ha->hardware_lock, flags); 2284 2285 /* Check for room in outstanding command list. */ 2286 handle = req->current_outstanding_cmd; 2287 for (index = 1; index < req->num_outstanding_cmds; index++) { 2288 handle++; 2289 if (handle == req->num_outstanding_cmds) 2290 handle = 1; 2291 if (!req->outstanding_cmds[handle]) 2292 break; 2293 } 2294 if (index == req->num_outstanding_cmds) 2295 goto queuing_error; 2296 2297 /* Map the sg table so we have an accurate count of sg entries needed */ 2298 if (scsi_sg_count(cmd)) { 2299 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2300 scsi_sg_count(cmd), cmd->sc_data_direction); 2301 if (unlikely(!nseg)) 2302 goto queuing_error; 2303 } else 2304 nseg = 0; 2305 2306 tot_dsds = nseg; 2307 2308 if (tot_dsds > ql2xshiftctondsd) { 2309 struct cmd_type_6 *cmd_pkt; 2310 uint16_t more_dsd_lists = 0; 2311 struct dsd_dma *dsd_ptr; 2312 uint16_t i; 2313 2314 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 2315 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 2316 ql_dbg(ql_dbg_io, vha, 0x300d, 2317 "Num of DSD list %d is than %d for cmd=%p.\n", 2318 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 2319 cmd); 2320 goto queuing_error; 2321 } 2322 2323 if (more_dsd_lists <= ha->gbl_dsd_avail) 2324 goto sufficient_dsds; 2325 else 2326 more_dsd_lists -= ha->gbl_dsd_avail; 2327 2328 for (i = 0; i < more_dsd_lists; i++) { 2329 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2330 if (!dsd_ptr) { 2331 ql_log(ql_log_fatal, vha, 0x300e, 2332 "Failed to allocate memory for dsd_dma " 2333 "for cmd=%p.\n", cmd); 2334 goto queuing_error; 2335 } 2336 2337 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2338 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2339 if (!dsd_ptr->dsd_addr) { 2340 kfree(dsd_ptr); 2341 ql_log(ql_log_fatal, vha, 0x300f, 2342 "Failed to allocate memory for dsd_addr " 2343 "for cmd=%p.\n", cmd); 2344 goto queuing_error; 2345 } 2346 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2347 ha->gbl_dsd_avail++; 2348 } 2349 2350 sufficient_dsds: 2351 req_cnt = 1; 2352 2353 if (req->cnt < (req_cnt + 2)) { 2354 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2355 ®->req_q_out[0]); 2356 if (req->ring_index < cnt) 2357 req->cnt = cnt - req->ring_index; 2358 else 2359 req->cnt = req->length - 2360 (req->ring_index - cnt); 2361 if (req->cnt < (req_cnt + 2)) 2362 goto queuing_error; 2363 } 2364 2365 ctx = sp->u.scmd.ctx = 2366 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2367 if (!ctx) { 2368 ql_log(ql_log_fatal, vha, 0x3010, 2369 "Failed to allocate ctx for cmd=%p.\n", cmd); 2370 goto queuing_error; 2371 } 2372 2373 memset(ctx, 0, sizeof(struct ct6_dsd)); 2374 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2375 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2376 if (!ctx->fcp_cmnd) { 2377 ql_log(ql_log_fatal, vha, 0x3011, 2378 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2379 goto queuing_error; 2380 } 2381 2382 /* Initialize the DSD list and dma handle */ 2383 INIT_LIST_HEAD(&ctx->dsd_list); 2384 ctx->dsd_use_cnt = 0; 2385 2386 if (cmd->cmd_len > 16) { 2387 additional_cdb_len = cmd->cmd_len - 16; 2388 if ((cmd->cmd_len % 4) != 0) { 2389 /* SCSI command bigger than 16 bytes must be 2390 * multiple of 4 2391 */ 2392 ql_log(ql_log_warn, vha, 0x3012, 2393 "scsi cmd len %d not multiple of 4 " 2394 "for cmd=%p.\n", cmd->cmd_len, cmd); 2395 goto queuing_error_fcp_cmnd; 2396 } 2397 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2398 } else { 2399 additional_cdb_len = 0; 2400 ctx->fcp_cmnd_len = 12 + 16 + 4; 2401 } 2402 2403 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 2404 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2405 2406 /* Zero out remaining portion of packet. */ 2407 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2408 clr_ptr = (uint32_t *)cmd_pkt + 2; 2409 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2410 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2411 2412 /* Set NPORT-ID and LUN number*/ 2413 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2414 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2415 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2416 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2417 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2418 2419 /* Build IOCB segments */ 2420 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2421 goto queuing_error_fcp_cmnd; 2422 2423 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2424 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2425 2426 /* build FCP_CMND IU */ 2427 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2428 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 2429 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2430 2431 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2432 ctx->fcp_cmnd->additional_cdb_len |= 1; 2433 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2434 ctx->fcp_cmnd->additional_cdb_len |= 2; 2435 2436 /* Populate the FCP_PRIO. */ 2437 if (ha->flags.fcp_prio_enabled) 2438 ctx->fcp_cmnd->task_attribute |= 2439 sp->fcport->fcp_prio << 3; 2440 2441 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2442 2443 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2444 additional_cdb_len); 2445 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 2446 2447 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 2448 cmd_pkt->fcp_cmnd_dseg_address[0] = 2449 cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); 2450 cmd_pkt->fcp_cmnd_dseg_address[1] = 2451 cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); 2452 2453 sp->flags |= SRB_FCP_CMND_DMA_VALID; 2454 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2455 /* Set total data segment count. */ 2456 cmd_pkt->entry_count = (uint8_t)req_cnt; 2457 /* Specify response queue number where 2458 * completion should happen 2459 */ 2460 cmd_pkt->entry_status = (uint8_t) rsp->id; 2461 } else { 2462 struct cmd_type_7 *cmd_pkt; 2463 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2464 if (req->cnt < (req_cnt + 2)) { 2465 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2466 ®->req_q_out[0]); 2467 if (req->ring_index < cnt) 2468 req->cnt = cnt - req->ring_index; 2469 else 2470 req->cnt = req->length - 2471 (req->ring_index - cnt); 2472 } 2473 if (req->cnt < (req_cnt + 2)) 2474 goto queuing_error; 2475 2476 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2477 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2478 2479 /* Zero out remaining portion of packet. */ 2480 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2481 clr_ptr = (uint32_t *)cmd_pkt + 2; 2482 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2483 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2484 2485 /* Set NPORT-ID and LUN number*/ 2486 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2487 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2488 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2489 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2490 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2491 2492 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2493 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2494 sizeof(cmd_pkt->lun)); 2495 2496 /* Populate the FCP_PRIO. */ 2497 if (ha->flags.fcp_prio_enabled) 2498 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 2499 2500 /* Load SCSI command packet. */ 2501 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2502 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2503 2504 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2505 2506 /* Build IOCB segments */ 2507 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 2508 2509 /* Set total data segment count. */ 2510 cmd_pkt->entry_count = (uint8_t)req_cnt; 2511 /* Specify response queue number where 2512 * completion should happen. 2513 */ 2514 cmd_pkt->entry_status = (uint8_t) rsp->id; 2515 2516 } 2517 /* Build command packet. */ 2518 req->current_outstanding_cmd = handle; 2519 req->outstanding_cmds[handle] = sp; 2520 sp->handle = handle; 2521 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2522 req->cnt -= req_cnt; 2523 wmb(); 2524 2525 /* Adjust ring index. */ 2526 req->ring_index++; 2527 if (req->ring_index == req->length) { 2528 req->ring_index = 0; 2529 req->ring_ptr = req->ring; 2530 } else 2531 req->ring_ptr++; 2532 2533 sp->flags |= SRB_DMA_VALID; 2534 2535 /* Set chip new ring index. */ 2536 /* write, read and verify logic */ 2537 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2538 if (ql2xdbwr) 2539 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 2540 else { 2541 WRT_REG_DWORD( 2542 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2543 dbval); 2544 wmb(); 2545 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { 2546 WRT_REG_DWORD( 2547 (unsigned long __iomem *)ha->nxdb_wr_ptr, 2548 dbval); 2549 wmb(); 2550 } 2551 } 2552 2553 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2554 if (vha->flags.process_response_queue && 2555 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2556 qla24xx_process_response_queue(vha, rsp); 2557 2558 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2559 return QLA_SUCCESS; 2560 2561 queuing_error_fcp_cmnd: 2562 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 2563 queuing_error: 2564 if (tot_dsds) 2565 scsi_dma_unmap(cmd); 2566 2567 if (sp->u.scmd.ctx) { 2568 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); 2569 sp->u.scmd.ctx = NULL; 2570 } 2571 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2572 2573 return QLA_FUNCTION_FAILED; 2574 } 2575 2576 static void 2577 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 2578 { 2579 struct srb_iocb *aio = &sp->u.iocb_cmd; 2580 scsi_qla_host_t *vha = sp->fcport->vha; 2581 struct req_que *req = vha->req; 2582 2583 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 2584 abt_iocb->entry_type = ABORT_IOCB_TYPE; 2585 abt_iocb->entry_count = 1; 2586 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 2587 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2588 abt_iocb->handle_to_abort = 2589 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); 2590 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2591 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 2592 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2593 abt_iocb->vp_index = vha->vp_idx; 2594 abt_iocb->req_que_no = cpu_to_le16(req->id); 2595 /* Send the command to the firmware */ 2596 wmb(); 2597 } 2598 2599 int 2600 qla2x00_start_sp(srb_t *sp) 2601 { 2602 int rval; 2603 struct qla_hw_data *ha = sp->fcport->vha->hw; 2604 void *pkt; 2605 unsigned long flags; 2606 2607 rval = QLA_FUNCTION_FAILED; 2608 spin_lock_irqsave(&ha->hardware_lock, flags); 2609 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 2610 if (!pkt) { 2611 ql_log(ql_log_warn, sp->fcport->vha, 0x700c, 2612 "qla2x00_alloc_iocbs failed.\n"); 2613 goto done; 2614 } 2615 2616 rval = QLA_SUCCESS; 2617 switch (sp->type) { 2618 case SRB_LOGIN_CMD: 2619 IS_FWI2_CAPABLE(ha) ? 2620 qla24xx_login_iocb(sp, pkt) : 2621 qla2x00_login_iocb(sp, pkt); 2622 break; 2623 case SRB_LOGOUT_CMD: 2624 IS_FWI2_CAPABLE(ha) ? 2625 qla24xx_logout_iocb(sp, pkt) : 2626 qla2x00_logout_iocb(sp, pkt); 2627 break; 2628 case SRB_ELS_CMD_RPT: 2629 case SRB_ELS_CMD_HST: 2630 qla24xx_els_iocb(sp, pkt); 2631 break; 2632 case SRB_CT_CMD: 2633 IS_FWI2_CAPABLE(ha) ? 2634 qla24xx_ct_iocb(sp, pkt) : 2635 qla2x00_ct_iocb(sp, pkt); 2636 break; 2637 case SRB_ADISC_CMD: 2638 IS_FWI2_CAPABLE(ha) ? 2639 qla24xx_adisc_iocb(sp, pkt) : 2640 qla2x00_adisc_iocb(sp, pkt); 2641 break; 2642 case SRB_TM_CMD: 2643 IS_QLAFX00(ha) ? 2644 qlafx00_tm_iocb(sp, pkt) : 2645 qla24xx_tm_iocb(sp, pkt); 2646 break; 2647 case SRB_FXIOCB_DCMD: 2648 case SRB_FXIOCB_BCMD: 2649 qlafx00_fxdisc_iocb(sp, pkt); 2650 break; 2651 case SRB_ABT_CMD: 2652 IS_QLAFX00(ha) ? 2653 qlafx00_abort_iocb(sp, pkt) : 2654 qla24xx_abort_iocb(sp, pkt); 2655 break; 2656 default: 2657 break; 2658 } 2659 2660 wmb(); 2661 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); 2662 done: 2663 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2664 return rval; 2665 } 2666 2667 static void 2668 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 2669 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 2670 { 2671 uint16_t avail_dsds; 2672 uint32_t *cur_dsd; 2673 uint32_t req_data_len = 0; 2674 uint32_t rsp_data_len = 0; 2675 struct scatterlist *sg; 2676 int index; 2677 int entry_count = 1; 2678 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2679 2680 /*Update entry type to indicate bidir command */ 2681 *((uint32_t *)(&cmd_pkt->entry_type)) = 2682 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL); 2683 2684 /* Set the transfer direction, in this set both flags 2685 * Also set the BD_WRAP_BACK flag, firmware will take care 2686 * assigning DID=SID for outgoing pkts. 2687 */ 2688 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2689 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2690 cmd_pkt->control_flags = 2691 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 2692 BD_WRAP_BACK); 2693 2694 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 2695 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 2696 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 2697 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 2698 2699 vha->bidi_stats.transfer_bytes += req_data_len; 2700 vha->bidi_stats.io_count++; 2701 2702 vha->qla_stats.output_bytes += req_data_len; 2703 vha->qla_stats.output_requests++; 2704 2705 /* Only one dsd is available for bidirectional IOCB, remaining dsds 2706 * are bundled in continuation iocb 2707 */ 2708 avail_dsds = 1; 2709 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2710 2711 index = 0; 2712 2713 for_each_sg(bsg_job->request_payload.sg_list, sg, 2714 bsg_job->request_payload.sg_cnt, index) { 2715 dma_addr_t sle_dma; 2716 cont_a64_entry_t *cont_pkt; 2717 2718 /* Allocate additional continuation packets */ 2719 if (avail_dsds == 0) { 2720 /* Continuation type 1 IOCB can accomodate 2721 * 5 DSDS 2722 */ 2723 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 2724 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2725 avail_dsds = 5; 2726 entry_count++; 2727 } 2728 sle_dma = sg_dma_address(sg); 2729 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2730 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2731 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2732 avail_dsds--; 2733 } 2734 /* For read request DSD will always goes to continuation IOCB 2735 * and follow the write DSD. If there is room on the current IOCB 2736 * then it is added to that IOCB else new continuation IOCB is 2737 * allocated. 2738 */ 2739 for_each_sg(bsg_job->reply_payload.sg_list, sg, 2740 bsg_job->reply_payload.sg_cnt, index) { 2741 dma_addr_t sle_dma; 2742 cont_a64_entry_t *cont_pkt; 2743 2744 /* Allocate additional continuation packets */ 2745 if (avail_dsds == 0) { 2746 /* Continuation type 1 IOCB can accomodate 2747 * 5 DSDS 2748 */ 2749 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 2750 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2751 avail_dsds = 5; 2752 entry_count++; 2753 } 2754 sle_dma = sg_dma_address(sg); 2755 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2756 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2757 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2758 avail_dsds--; 2759 } 2760 /* This value should be same as number of IOCB required for this cmd */ 2761 cmd_pkt->entry_count = entry_count; 2762 } 2763 2764 int 2765 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 2766 { 2767 2768 struct qla_hw_data *ha = vha->hw; 2769 unsigned long flags; 2770 uint32_t handle; 2771 uint32_t index; 2772 uint16_t req_cnt; 2773 uint16_t cnt; 2774 uint32_t *clr_ptr; 2775 struct cmd_bidir *cmd_pkt = NULL; 2776 struct rsp_que *rsp; 2777 struct req_que *req; 2778 int rval = EXT_STATUS_OK; 2779 2780 rval = QLA_SUCCESS; 2781 2782 rsp = ha->rsp_q_map[0]; 2783 req = vha->req; 2784 2785 /* Send marker if required */ 2786 if (vha->marker_needed != 0) { 2787 if (qla2x00_marker(vha, req, 2788 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2789 return EXT_STATUS_MAILBOX; 2790 vha->marker_needed = 0; 2791 } 2792 2793 /* Acquire ring specific lock */ 2794 spin_lock_irqsave(&ha->hardware_lock, flags); 2795 2796 /* Check for room in outstanding command list. */ 2797 handle = req->current_outstanding_cmd; 2798 for (index = 1; index < req->num_outstanding_cmds; index++) { 2799 handle++; 2800 if (handle == req->num_outstanding_cmds) 2801 handle = 1; 2802 if (!req->outstanding_cmds[handle]) 2803 break; 2804 } 2805 2806 if (index == req->num_outstanding_cmds) { 2807 rval = EXT_STATUS_BUSY; 2808 goto queuing_error; 2809 } 2810 2811 /* Calculate number of IOCB required */ 2812 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2813 2814 /* Check for room on request queue. */ 2815 if (req->cnt < req_cnt + 2) { 2816 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2817 RD_REG_DWORD_RELAXED(req->req_q_out); 2818 if (req->ring_index < cnt) 2819 req->cnt = cnt - req->ring_index; 2820 else 2821 req->cnt = req->length - 2822 (req->ring_index - cnt); 2823 } 2824 if (req->cnt < req_cnt + 2) { 2825 rval = EXT_STATUS_BUSY; 2826 goto queuing_error; 2827 } 2828 2829 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 2830 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2831 2832 /* Zero out remaining portion of packet. */ 2833 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2834 clr_ptr = (uint32_t *)cmd_pkt + 2; 2835 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2836 2837 /* Set NPORT-ID (of vha)*/ 2838 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 2839 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 2840 cmd_pkt->port_id[1] = vha->d_id.b.area; 2841 cmd_pkt->port_id[2] = vha->d_id.b.domain; 2842 2843 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 2844 cmd_pkt->entry_status = (uint8_t) rsp->id; 2845 /* Build command packet. */ 2846 req->current_outstanding_cmd = handle; 2847 req->outstanding_cmds[handle] = sp; 2848 sp->handle = handle; 2849 req->cnt -= req_cnt; 2850 2851 /* Send the command to the firmware */ 2852 wmb(); 2853 qla2x00_start_iocbs(vha, req); 2854 queuing_error: 2855 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2856 return rval; 2857 } 2858