1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/blkdev.h> 11 #include <linux/delay.h> 12 13 #include <scsi/scsi_tcq.h> 14 15 static void qla25xx_set_que(srb_t *, struct rsp_que **); 16 /** 17 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 18 * @cmd: SCSI command 19 * 20 * Returns the proper CF_* direction based on CDB. 21 */ 22 static inline uint16_t 23 qla2x00_get_cmd_direction(srb_t *sp) 24 { 25 uint16_t cflags; 26 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 27 struct scsi_qla_host *vha = sp->fcport->vha; 28 29 cflags = 0; 30 31 /* Set transfer direction */ 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 33 cflags = CF_WRITE; 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 35 vha->qla_stats.output_requests++; 36 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 37 cflags = CF_READ; 38 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 39 vha->qla_stats.input_requests++; 40 } 41 return (cflags); 42 } 43 44 /** 45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 46 * Continuation Type 0 IOCBs to allocate. 47 * 48 * @dsds: number of data segment decriptors needed 49 * 50 * Returns the number of IOCB entries needed to store @dsds. 51 */ 52 uint16_t 53 qla2x00_calc_iocbs_32(uint16_t dsds) 54 { 55 uint16_t iocbs; 56 57 iocbs = 1; 58 if (dsds > 3) { 59 iocbs += (dsds - 3) / 7; 60 if ((dsds - 3) % 7) 61 iocbs++; 62 } 63 return (iocbs); 64 } 65 66 /** 67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 68 * Continuation Type 1 IOCBs to allocate. 69 * 70 * @dsds: number of data segment decriptors needed 71 * 72 * Returns the number of IOCB entries needed to store @dsds. 73 */ 74 uint16_t 75 qla2x00_calc_iocbs_64(uint16_t dsds) 76 { 77 uint16_t iocbs; 78 79 iocbs = 1; 80 if (dsds > 2) { 81 iocbs += (dsds - 2) / 5; 82 if ((dsds - 2) % 5) 83 iocbs++; 84 } 85 return (iocbs); 86 } 87 88 /** 89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 90 * @ha: HA context 91 * 92 * Returns a pointer to the Continuation Type 0 IOCB packet. 93 */ 94 static inline cont_entry_t * 95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 96 { 97 cont_entry_t *cont_pkt; 98 struct req_que *req = vha->req; 99 /* Adjust ring index. */ 100 req->ring_index++; 101 if (req->ring_index == req->length) { 102 req->ring_index = 0; 103 req->ring_ptr = req->ring; 104 } else { 105 req->ring_ptr++; 106 } 107 108 cont_pkt = (cont_entry_t *)req->ring_ptr; 109 110 /* Load packet defaults. */ 111 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE); 112 113 return (cont_pkt); 114 } 115 116 /** 117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 118 * @ha: HA context 119 * 120 * Returns a pointer to the continuation type 1 IOCB packet. 121 */ 122 static inline cont_a64_entry_t * 123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 124 { 125 cont_a64_entry_t *cont_pkt; 126 127 /* Adjust ring index. */ 128 req->ring_index++; 129 if (req->ring_index == req->length) { 130 req->ring_index = 0; 131 req->ring_ptr = req->ring; 132 } else { 133 req->ring_ptr++; 134 } 135 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 137 138 /* Load packet defaults. */ 139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? 140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) : 141 cpu_to_le32(CONTINUE_A64_TYPE); 142 143 return (cont_pkt); 144 } 145 146 static inline int 147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 148 { 149 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 150 uint8_t guard = scsi_host_get_guard(cmd->device->host); 151 152 /* We always use DIFF Bundling for best performance */ 153 *fw_prot_opts = 0; 154 155 /* Translate SCSI opcode to a protection opcode */ 156 switch (scsi_get_prot_op(cmd)) { 157 case SCSI_PROT_READ_STRIP: 158 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 159 break; 160 case SCSI_PROT_WRITE_INSERT: 161 *fw_prot_opts |= PO_MODE_DIF_INSERT; 162 break; 163 case SCSI_PROT_READ_INSERT: 164 *fw_prot_opts |= PO_MODE_DIF_INSERT; 165 break; 166 case SCSI_PROT_WRITE_STRIP: 167 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 168 break; 169 case SCSI_PROT_READ_PASS: 170 case SCSI_PROT_WRITE_PASS: 171 if (guard & SHOST_DIX_GUARD_IP) 172 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 173 else 174 *fw_prot_opts |= PO_MODE_DIF_PASS; 175 break; 176 default: /* Normal Request */ 177 *fw_prot_opts |= PO_MODE_DIF_PASS; 178 break; 179 } 180 181 return scsi_prot_sg_count(cmd); 182 } 183 184 /* 185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 186 * capable IOCB types. 187 * 188 * @sp: SRB command to process 189 * @cmd_pkt: Command type 2 IOCB 190 * @tot_dsds: Total number of segments to transfer 191 */ 192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 193 uint16_t tot_dsds) 194 { 195 uint16_t avail_dsds; 196 uint32_t *cur_dsd; 197 scsi_qla_host_t *vha; 198 struct scsi_cmnd *cmd; 199 struct scatterlist *sg; 200 int i; 201 202 cmd = GET_CMD_SP(sp); 203 204 /* Update entry type to indicate Command Type 2 IOCB */ 205 *((uint32_t *)(&cmd_pkt->entry_type)) = 206 cpu_to_le32(COMMAND_TYPE); 207 208 /* No data transfer */ 209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 210 cmd_pkt->byte_count = cpu_to_le32(0); 211 return; 212 } 213 214 vha = sp->fcport->vha; 215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 216 217 /* Three DSDs are available in the Command Type 2 IOCB */ 218 avail_dsds = 3; 219 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 220 221 /* Load data segments */ 222 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 223 cont_entry_t *cont_pkt; 224 225 /* Allocate additional continuation packets? */ 226 if (avail_dsds == 0) { 227 /* 228 * Seven DSDs are available in the Continuation 229 * Type 0 IOCB. 230 */ 231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 232 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 233 avail_dsds = 7; 234 } 235 236 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); 237 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 238 avail_dsds--; 239 } 240 } 241 242 /** 243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 244 * capable IOCB types. 245 * 246 * @sp: SRB command to process 247 * @cmd_pkt: Command type 3 IOCB 248 * @tot_dsds: Total number of segments to transfer 249 */ 250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 251 uint16_t tot_dsds) 252 { 253 uint16_t avail_dsds; 254 uint32_t *cur_dsd; 255 scsi_qla_host_t *vha; 256 struct scsi_cmnd *cmd; 257 struct scatterlist *sg; 258 int i; 259 260 cmd = GET_CMD_SP(sp); 261 262 /* Update entry type to indicate Command Type 3 IOCB */ 263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE); 264 265 /* No data transfer */ 266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 267 cmd_pkt->byte_count = cpu_to_le32(0); 268 return; 269 } 270 271 vha = sp->fcport->vha; 272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 273 274 /* Two DSDs are available in the Command Type 3 IOCB */ 275 avail_dsds = 2; 276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 277 278 /* Load data segments */ 279 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 280 dma_addr_t sle_dma; 281 cont_a64_entry_t *cont_pkt; 282 283 /* Allocate additional continuation packets? */ 284 if (avail_dsds == 0) { 285 /* 286 * Five DSDs are available in the Continuation 287 * Type 1 IOCB. 288 */ 289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 291 avail_dsds = 5; 292 } 293 294 sle_dma = sg_dma_address(sg); 295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 298 avail_dsds--; 299 } 300 } 301 302 /** 303 * qla2x00_start_scsi() - Send a SCSI command to the ISP 304 * @sp: command to send to the ISP 305 * 306 * Returns non-zero if a failure occurred, else zero. 307 */ 308 int 309 qla2x00_start_scsi(srb_t *sp) 310 { 311 int nseg; 312 unsigned long flags; 313 scsi_qla_host_t *vha; 314 struct scsi_cmnd *cmd; 315 uint32_t *clr_ptr; 316 uint32_t index; 317 uint32_t handle; 318 cmd_entry_t *cmd_pkt; 319 uint16_t cnt; 320 uint16_t req_cnt; 321 uint16_t tot_dsds; 322 struct device_reg_2xxx __iomem *reg; 323 struct qla_hw_data *ha; 324 struct req_que *req; 325 struct rsp_que *rsp; 326 327 /* Setup device pointers. */ 328 vha = sp->fcport->vha; 329 ha = vha->hw; 330 reg = &ha->iobase->isp; 331 cmd = GET_CMD_SP(sp); 332 req = ha->req_q_map[0]; 333 rsp = ha->rsp_q_map[0]; 334 /* So we know we haven't pci_map'ed anything yet */ 335 tot_dsds = 0; 336 337 /* Send marker if required */ 338 if (vha->marker_needed != 0) { 339 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 340 QLA_SUCCESS) { 341 return (QLA_FUNCTION_FAILED); 342 } 343 vha->marker_needed = 0; 344 } 345 346 /* Acquire ring specific lock */ 347 spin_lock_irqsave(&ha->hardware_lock, flags); 348 349 /* Check for room in outstanding command list. */ 350 handle = req->current_outstanding_cmd; 351 for (index = 1; index < req->num_outstanding_cmds; index++) { 352 handle++; 353 if (handle == req->num_outstanding_cmds) 354 handle = 1; 355 if (!req->outstanding_cmds[handle]) 356 break; 357 } 358 if (index == req->num_outstanding_cmds) 359 goto queuing_error; 360 361 /* Map the sg table so we have an accurate count of sg entries needed */ 362 if (scsi_sg_count(cmd)) { 363 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 364 scsi_sg_count(cmd), cmd->sc_data_direction); 365 if (unlikely(!nseg)) 366 goto queuing_error; 367 } else 368 nseg = 0; 369 370 tot_dsds = nseg; 371 372 /* Calculate the number of request entries needed. */ 373 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 374 if (req->cnt < (req_cnt + 2)) { 375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 376 if (req->ring_index < cnt) 377 req->cnt = cnt - req->ring_index; 378 else 379 req->cnt = req->length - 380 (req->ring_index - cnt); 381 /* If still no head room then bail out */ 382 if (req->cnt < (req_cnt + 2)) 383 goto queuing_error; 384 } 385 386 /* Build command packet */ 387 req->current_outstanding_cmd = handle; 388 req->outstanding_cmds[handle] = sp; 389 sp->handle = handle; 390 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 391 req->cnt -= req_cnt; 392 393 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 394 cmd_pkt->handle = handle; 395 /* Zero out remaining portion of packet. */ 396 clr_ptr = (uint32_t *)cmd_pkt + 2; 397 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 398 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 399 400 /* Set target ID and LUN number*/ 401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 404 405 /* Load SCSI command packet. */ 406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 407 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 408 409 /* Build IOCB segments */ 410 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 411 412 /* Set total data segment count. */ 413 cmd_pkt->entry_count = (uint8_t)req_cnt; 414 wmb(); 415 416 /* Adjust ring index. */ 417 req->ring_index++; 418 if (req->ring_index == req->length) { 419 req->ring_index = 0; 420 req->ring_ptr = req->ring; 421 } else 422 req->ring_ptr++; 423 424 sp->flags |= SRB_DMA_VALID; 425 426 /* Set chip new ring index. */ 427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); 428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 429 430 /* Manage unprocessed RIO/ZIO commands in response queue. */ 431 if (vha->flags.process_response_queue && 432 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 433 qla2x00_process_response_queue(rsp); 434 435 spin_unlock_irqrestore(&ha->hardware_lock, flags); 436 return (QLA_SUCCESS); 437 438 queuing_error: 439 if (tot_dsds) 440 scsi_dma_unmap(cmd); 441 442 spin_unlock_irqrestore(&ha->hardware_lock, flags); 443 444 return (QLA_FUNCTION_FAILED); 445 } 446 447 /** 448 * qla2x00_start_iocbs() - Execute the IOCB command 449 */ 450 void 451 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 452 { 453 struct qla_hw_data *ha = vha->hw; 454 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 455 456 if (IS_P3P_TYPE(ha)) { 457 qla82xx_start_iocbs(vha); 458 } else { 459 /* Adjust ring index. */ 460 req->ring_index++; 461 if (req->ring_index == req->length) { 462 req->ring_index = 0; 463 req->ring_ptr = req->ring; 464 } else 465 req->ring_ptr++; 466 467 /* Set chip new ring index. */ 468 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 469 WRT_REG_DWORD(req->req_q_in, req->ring_index); 470 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 471 } else if (IS_QLAFX00(ha)) { 472 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); 473 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); 474 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 475 } else if (IS_FWI2_CAPABLE(ha)) { 476 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); 477 RD_REG_DWORD_RELAXED(®->isp24.req_q_in); 478 } else { 479 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), 480 req->ring_index); 481 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); 482 } 483 } 484 } 485 486 /** 487 * qla2x00_marker() - Send a marker IOCB to the firmware. 488 * @ha: HA context 489 * @loop_id: loop ID 490 * @lun: LUN 491 * @type: marker modifier 492 * 493 * Can be called from both normal and interrupt context. 494 * 495 * Returns non-zero if a failure occurred, else zero. 496 */ 497 static int 498 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 499 struct rsp_que *rsp, uint16_t loop_id, 500 uint64_t lun, uint8_t type) 501 { 502 mrk_entry_t *mrk; 503 struct mrk_entry_24xx *mrk24 = NULL; 504 505 struct qla_hw_data *ha = vha->hw; 506 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 507 508 req = ha->req_q_map[0]; 509 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); 510 if (mrk == NULL) { 511 ql_log(ql_log_warn, base_vha, 0x3026, 512 "Failed to allocate Marker IOCB.\n"); 513 514 return (QLA_FUNCTION_FAILED); 515 } 516 517 mrk->entry_type = MARKER_TYPE; 518 mrk->modifier = type; 519 if (type != MK_SYNC_ALL) { 520 if (IS_FWI2_CAPABLE(ha)) { 521 mrk24 = (struct mrk_entry_24xx *) mrk; 522 mrk24->nport_handle = cpu_to_le16(loop_id); 523 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 524 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 525 mrk24->vp_index = vha->vp_idx; 526 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); 527 } else { 528 SET_TARGET_ID(ha, mrk->target, loop_id); 529 mrk->lun = cpu_to_le16((uint16_t)lun); 530 } 531 } 532 wmb(); 533 534 qla2x00_start_iocbs(vha, req); 535 536 return (QLA_SUCCESS); 537 } 538 539 int 540 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, 541 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, 542 uint8_t type) 543 { 544 int ret; 545 unsigned long flags = 0; 546 547 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 548 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); 549 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 550 551 return (ret); 552 } 553 554 /* 555 * qla2x00_issue_marker 556 * 557 * Issue marker 558 * Caller CAN have hardware lock held as specified by ha_locked parameter. 559 * Might release it, then reaquire. 560 */ 561 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 562 { 563 if (ha_locked) { 564 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 565 MK_SYNC_ALL) != QLA_SUCCESS) 566 return QLA_FUNCTION_FAILED; 567 } else { 568 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, 569 MK_SYNC_ALL) != QLA_SUCCESS) 570 return QLA_FUNCTION_FAILED; 571 } 572 vha->marker_needed = 0; 573 574 return QLA_SUCCESS; 575 } 576 577 static inline int 578 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 579 uint16_t tot_dsds) 580 { 581 uint32_t *cur_dsd = NULL; 582 scsi_qla_host_t *vha; 583 struct qla_hw_data *ha; 584 struct scsi_cmnd *cmd; 585 struct scatterlist *cur_seg; 586 uint32_t *dsd_seg; 587 void *next_dsd; 588 uint8_t avail_dsds; 589 uint8_t first_iocb = 1; 590 uint32_t dsd_list_len; 591 struct dsd_dma *dsd_ptr; 592 struct ct6_dsd *ctx; 593 594 cmd = GET_CMD_SP(sp); 595 596 /* Update entry type to indicate Command Type 3 IOCB */ 597 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6); 598 599 /* No data transfer */ 600 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 601 cmd_pkt->byte_count = cpu_to_le32(0); 602 return 0; 603 } 604 605 vha = sp->fcport->vha; 606 ha = vha->hw; 607 608 /* Set transfer direction */ 609 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 610 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 611 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 612 vha->qla_stats.output_requests++; 613 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 614 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 615 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 616 vha->qla_stats.input_requests++; 617 } 618 619 cur_seg = scsi_sglist(cmd); 620 ctx = GET_CMD_CTX_SP(sp); 621 622 while (tot_dsds) { 623 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 624 QLA_DSDS_PER_IOCB : tot_dsds; 625 tot_dsds -= avail_dsds; 626 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 627 628 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 629 struct dsd_dma, list); 630 next_dsd = dsd_ptr->dsd_addr; 631 list_del(&dsd_ptr->list); 632 ha->gbl_dsd_avail--; 633 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 634 ctx->dsd_use_cnt++; 635 ha->gbl_dsd_inuse++; 636 637 if (first_iocb) { 638 first_iocb = 0; 639 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 640 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 641 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 642 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); 643 } else { 644 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 645 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 646 *cur_dsd++ = cpu_to_le32(dsd_list_len); 647 } 648 cur_dsd = (uint32_t *)next_dsd; 649 while (avail_dsds) { 650 dma_addr_t sle_dma; 651 652 sle_dma = sg_dma_address(cur_seg); 653 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 654 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 655 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); 656 cur_seg = sg_next(cur_seg); 657 avail_dsds--; 658 } 659 } 660 661 /* Null termination */ 662 *cur_dsd++ = 0; 663 *cur_dsd++ = 0; 664 *cur_dsd++ = 0; 665 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 666 return 0; 667 } 668 669 /* 670 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 671 * for Command Type 6. 672 * 673 * @dsds: number of data segment decriptors needed 674 * 675 * Returns the number of dsd list needed to store @dsds. 676 */ 677 static inline uint16_t 678 qla24xx_calc_dsd_lists(uint16_t dsds) 679 { 680 uint16_t dsd_lists = 0; 681 682 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 683 if (dsds % QLA_DSDS_PER_IOCB) 684 dsd_lists++; 685 return dsd_lists; 686 } 687 688 689 /** 690 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 691 * IOCB types. 692 * 693 * @sp: SRB command to process 694 * @cmd_pkt: Command type 3 IOCB 695 * @tot_dsds: Total number of segments to transfer 696 */ 697 static inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds) 700 { 701 uint16_t avail_dsds; 702 uint32_t *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 708 cmd = GET_CMD_SP(sp); 709 710 /* Update entry type to indicate Command Type 3 IOCB */ 711 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7); 712 713 /* No data transfer */ 714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 715 cmd_pkt->byte_count = cpu_to_le32(0); 716 return; 717 } 718 719 vha = sp->fcport->vha; 720 721 /* Set transfer direction */ 722 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 724 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 725 vha->qla_stats.output_requests++; 726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 728 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 729 vha->qla_stats.input_requests++; 730 } 731 732 /* One DSD is available in the Command Type 3 IOCB */ 733 avail_dsds = 1; 734 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 735 736 /* Load data segments */ 737 738 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 739 dma_addr_t sle_dma; 740 cont_a64_entry_t *cont_pkt; 741 742 /* Allocate additional continuation packets? */ 743 if (avail_dsds == 0) { 744 /* 745 * Five DSDs are available in the Continuation 746 * Type 1 IOCB. 747 */ 748 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 749 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 750 avail_dsds = 5; 751 } 752 753 sle_dma = sg_dma_address(sg); 754 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 755 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 756 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 757 avail_dsds--; 758 } 759 } 760 761 struct fw_dif_context { 762 uint32_t ref_tag; 763 uint16_t app_tag; 764 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 765 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 766 }; 767 768 /* 769 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 770 * 771 */ 772 static inline void 773 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 774 unsigned int protcnt) 775 { 776 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 777 778 switch (scsi_get_prot_type(cmd)) { 779 case SCSI_PROT_DIF_TYPE0: 780 /* 781 * No check for ql2xenablehba_err_chk, as it would be an 782 * I/O error if hba tag generation is not done. 783 */ 784 pkt->ref_tag = cpu_to_le32((uint32_t) 785 (0xffffffff & scsi_get_lba(cmd))); 786 787 if (!qla2x00_hba_err_chk_enabled(sp)) 788 break; 789 790 pkt->ref_tag_mask[0] = 0xff; 791 pkt->ref_tag_mask[1] = 0xff; 792 pkt->ref_tag_mask[2] = 0xff; 793 pkt->ref_tag_mask[3] = 0xff; 794 break; 795 796 /* 797 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 798 * match LBA in CDB + N 799 */ 800 case SCSI_PROT_DIF_TYPE2: 801 pkt->app_tag = cpu_to_le16(0); 802 pkt->app_tag_mask[0] = 0x0; 803 pkt->app_tag_mask[1] = 0x0; 804 805 pkt->ref_tag = cpu_to_le32((uint32_t) 806 (0xffffffff & scsi_get_lba(cmd))); 807 808 if (!qla2x00_hba_err_chk_enabled(sp)) 809 break; 810 811 /* enable ALL bytes of the ref tag */ 812 pkt->ref_tag_mask[0] = 0xff; 813 pkt->ref_tag_mask[1] = 0xff; 814 pkt->ref_tag_mask[2] = 0xff; 815 pkt->ref_tag_mask[3] = 0xff; 816 break; 817 818 /* For Type 3 protection: 16 bit GUARD only */ 819 case SCSI_PROT_DIF_TYPE3: 820 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 821 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 822 0x00; 823 break; 824 825 /* 826 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 827 * 16 bit app tag. 828 */ 829 case SCSI_PROT_DIF_TYPE1: 830 pkt->ref_tag = cpu_to_le32((uint32_t) 831 (0xffffffff & scsi_get_lba(cmd))); 832 pkt->app_tag = cpu_to_le16(0); 833 pkt->app_tag_mask[0] = 0x0; 834 pkt->app_tag_mask[1] = 0x0; 835 836 if (!qla2x00_hba_err_chk_enabled(sp)) 837 break; 838 839 /* enable ALL bytes of the ref tag */ 840 pkt->ref_tag_mask[0] = 0xff; 841 pkt->ref_tag_mask[1] = 0xff; 842 pkt->ref_tag_mask[2] = 0xff; 843 pkt->ref_tag_mask[3] = 0xff; 844 break; 845 } 846 } 847 848 struct qla2_sgx { 849 dma_addr_t dma_addr; /* OUT */ 850 uint32_t dma_len; /* OUT */ 851 852 uint32_t tot_bytes; /* IN */ 853 struct scatterlist *cur_sg; /* IN */ 854 855 /* for book keeping, bzero on initial invocation */ 856 uint32_t bytes_consumed; 857 uint32_t num_bytes; 858 uint32_t tot_partial; 859 860 /* for debugging */ 861 uint32_t num_sg; 862 srb_t *sp; 863 }; 864 865 static int 866 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 867 uint32_t *partial) 868 { 869 struct scatterlist *sg; 870 uint32_t cumulative_partial, sg_len; 871 dma_addr_t sg_dma_addr; 872 873 if (sgx->num_bytes == sgx->tot_bytes) 874 return 0; 875 876 sg = sgx->cur_sg; 877 cumulative_partial = sgx->tot_partial; 878 879 sg_dma_addr = sg_dma_address(sg); 880 sg_len = sg_dma_len(sg); 881 882 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 883 884 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 885 sgx->dma_len = (blk_sz - cumulative_partial); 886 sgx->tot_partial = 0; 887 sgx->num_bytes += blk_sz; 888 *partial = 0; 889 } else { 890 sgx->dma_len = sg_len - sgx->bytes_consumed; 891 sgx->tot_partial += sgx->dma_len; 892 *partial = 1; 893 } 894 895 sgx->bytes_consumed += sgx->dma_len; 896 897 if (sg_len == sgx->bytes_consumed) { 898 sg = sg_next(sg); 899 sgx->num_sg++; 900 sgx->cur_sg = sg; 901 sgx->bytes_consumed = 0; 902 } 903 904 return 1; 905 } 906 907 int 908 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 909 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 910 { 911 void *next_dsd; 912 uint8_t avail_dsds = 0; 913 uint32_t dsd_list_len; 914 struct dsd_dma *dsd_ptr; 915 struct scatterlist *sg_prot; 916 uint32_t *cur_dsd = dsd; 917 uint16_t used_dsds = tot_dsds; 918 919 uint32_t prot_int; /* protection interval */ 920 uint32_t partial; 921 struct qla2_sgx sgx; 922 dma_addr_t sle_dma; 923 uint32_t sle_dma_len, tot_prot_dma_len = 0; 924 struct scsi_cmnd *cmd; 925 926 memset(&sgx, 0, sizeof(struct qla2_sgx)); 927 if (sp) { 928 cmd = GET_CMD_SP(sp); 929 prot_int = cmd->device->sector_size; 930 931 sgx.tot_bytes = scsi_bufflen(cmd); 932 sgx.cur_sg = scsi_sglist(cmd); 933 sgx.sp = sp; 934 935 sg_prot = scsi_prot_sglist(cmd); 936 } else if (tc) { 937 prot_int = tc->blk_sz; 938 sgx.tot_bytes = tc->bufflen; 939 sgx.cur_sg = tc->sg; 940 sg_prot = tc->prot_sg; 941 } else { 942 BUG(); 943 return 1; 944 } 945 946 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 947 948 sle_dma = sgx.dma_addr; 949 sle_dma_len = sgx.dma_len; 950 alloc_and_fill: 951 /* Allocate additional continuation packets? */ 952 if (avail_dsds == 0) { 953 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 954 QLA_DSDS_PER_IOCB : used_dsds; 955 dsd_list_len = (avail_dsds + 1) * 12; 956 used_dsds -= avail_dsds; 957 958 /* allocate tracking DS */ 959 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 960 if (!dsd_ptr) 961 return 1; 962 963 /* allocate new list */ 964 dsd_ptr->dsd_addr = next_dsd = 965 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 966 &dsd_ptr->dsd_list_dma); 967 968 if (!next_dsd) { 969 /* 970 * Need to cleanup only this dsd_ptr, rest 971 * will be done by sp_free_dma() 972 */ 973 kfree(dsd_ptr); 974 return 1; 975 } 976 977 if (sp) { 978 list_add_tail(&dsd_ptr->list, 979 &((struct crc_context *) 980 sp->u.scmd.ctx)->dsd_list); 981 982 sp->flags |= SRB_CRC_CTX_DSD_VALID; 983 } else { 984 list_add_tail(&dsd_ptr->list, 985 &(tc->ctx->dsd_list)); 986 tc->ctx_dsd_alloced = 1; 987 } 988 989 990 /* add new list to cmd iocb or last list */ 991 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 992 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 993 *cur_dsd++ = dsd_list_len; 994 cur_dsd = (uint32_t *)next_dsd; 995 } 996 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 997 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 998 *cur_dsd++ = cpu_to_le32(sle_dma_len); 999 avail_dsds--; 1000 1001 if (partial == 0) { 1002 /* Got a full protection interval */ 1003 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 1004 sle_dma_len = 8; 1005 1006 tot_prot_dma_len += sle_dma_len; 1007 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 1008 tot_prot_dma_len = 0; 1009 sg_prot = sg_next(sg_prot); 1010 } 1011 1012 partial = 1; /* So as to not re-enter this block */ 1013 goto alloc_and_fill; 1014 } 1015 } 1016 /* Null termination */ 1017 *cur_dsd++ = 0; 1018 *cur_dsd++ = 0; 1019 *cur_dsd++ = 0; 1020 return 0; 1021 } 1022 1023 int 1024 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, 1025 uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1026 { 1027 void *next_dsd; 1028 uint8_t avail_dsds = 0; 1029 uint32_t dsd_list_len; 1030 struct dsd_dma *dsd_ptr; 1031 struct scatterlist *sg, *sgl; 1032 uint32_t *cur_dsd = dsd; 1033 int i; 1034 uint16_t used_dsds = tot_dsds; 1035 struct scsi_cmnd *cmd; 1036 1037 if (sp) { 1038 cmd = GET_CMD_SP(sp); 1039 sgl = scsi_sglist(cmd); 1040 } else if (tc) { 1041 sgl = tc->sg; 1042 } else { 1043 BUG(); 1044 return 1; 1045 } 1046 1047 1048 for_each_sg(sgl, sg, tot_dsds, i) { 1049 dma_addr_t sle_dma; 1050 1051 /* Allocate additional continuation packets? */ 1052 if (avail_dsds == 0) { 1053 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1054 QLA_DSDS_PER_IOCB : used_dsds; 1055 dsd_list_len = (avail_dsds + 1) * 12; 1056 used_dsds -= avail_dsds; 1057 1058 /* allocate tracking DS */ 1059 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1060 if (!dsd_ptr) 1061 return 1; 1062 1063 /* allocate new list */ 1064 dsd_ptr->dsd_addr = next_dsd = 1065 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1066 &dsd_ptr->dsd_list_dma); 1067 1068 if (!next_dsd) { 1069 /* 1070 * Need to cleanup only this dsd_ptr, rest 1071 * will be done by sp_free_dma() 1072 */ 1073 kfree(dsd_ptr); 1074 return 1; 1075 } 1076 1077 if (sp) { 1078 list_add_tail(&dsd_ptr->list, 1079 &((struct crc_context *) 1080 sp->u.scmd.ctx)->dsd_list); 1081 1082 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1083 } else { 1084 list_add_tail(&dsd_ptr->list, 1085 &(tc->ctx->dsd_list)); 1086 tc->ctx_dsd_alloced = 1; 1087 } 1088 1089 /* add new list to cmd iocb or last list */ 1090 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1091 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1092 *cur_dsd++ = dsd_list_len; 1093 cur_dsd = (uint32_t *)next_dsd; 1094 } 1095 sle_dma = sg_dma_address(sg); 1096 1097 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1098 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1099 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1100 avail_dsds--; 1101 1102 } 1103 /* Null termination */ 1104 *cur_dsd++ = 0; 1105 *cur_dsd++ = 0; 1106 *cur_dsd++ = 0; 1107 return 0; 1108 } 1109 1110 int 1111 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1112 uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1113 { 1114 void *next_dsd; 1115 uint8_t avail_dsds = 0; 1116 uint32_t dsd_list_len; 1117 struct dsd_dma *dsd_ptr; 1118 struct scatterlist *sg, *sgl; 1119 int i; 1120 struct scsi_cmnd *cmd; 1121 uint32_t *cur_dsd = dsd; 1122 uint16_t used_dsds = tot_dsds; 1123 struct scsi_qla_host *vha; 1124 1125 if (sp) { 1126 cmd = GET_CMD_SP(sp); 1127 sgl = scsi_prot_sglist(cmd); 1128 vha = sp->fcport->vha; 1129 } else if (tc) { 1130 vha = tc->vha; 1131 sgl = tc->prot_sg; 1132 } else { 1133 BUG(); 1134 return 1; 1135 } 1136 1137 ql_dbg(ql_dbg_tgt, vha, 0xe021, 1138 "%s: enter\n", __func__); 1139 1140 for_each_sg(sgl, sg, tot_dsds, i) { 1141 dma_addr_t sle_dma; 1142 1143 /* Allocate additional continuation packets? */ 1144 if (avail_dsds == 0) { 1145 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1146 QLA_DSDS_PER_IOCB : used_dsds; 1147 dsd_list_len = (avail_dsds + 1) * 12; 1148 used_dsds -= avail_dsds; 1149 1150 /* allocate tracking DS */ 1151 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1152 if (!dsd_ptr) 1153 return 1; 1154 1155 /* allocate new list */ 1156 dsd_ptr->dsd_addr = next_dsd = 1157 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1158 &dsd_ptr->dsd_list_dma); 1159 1160 if (!next_dsd) { 1161 /* 1162 * Need to cleanup only this dsd_ptr, rest 1163 * will be done by sp_free_dma() 1164 */ 1165 kfree(dsd_ptr); 1166 return 1; 1167 } 1168 1169 if (sp) { 1170 list_add_tail(&dsd_ptr->list, 1171 &((struct crc_context *) 1172 sp->u.scmd.ctx)->dsd_list); 1173 1174 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1175 } else { 1176 list_add_tail(&dsd_ptr->list, 1177 &(tc->ctx->dsd_list)); 1178 tc->ctx_dsd_alloced = 1; 1179 } 1180 1181 /* add new list to cmd iocb or last list */ 1182 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 1183 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 1184 *cur_dsd++ = dsd_list_len; 1185 cur_dsd = (uint32_t *)next_dsd; 1186 } 1187 sle_dma = sg_dma_address(sg); 1188 1189 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1190 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1191 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1192 1193 avail_dsds--; 1194 } 1195 /* Null termination */ 1196 *cur_dsd++ = 0; 1197 *cur_dsd++ = 0; 1198 *cur_dsd++ = 0; 1199 return 0; 1200 } 1201 1202 /** 1203 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1204 * Type 6 IOCB types. 1205 * 1206 * @sp: SRB command to process 1207 * @cmd_pkt: Command type 3 IOCB 1208 * @tot_dsds: Total number of segments to transfer 1209 */ 1210 static inline int 1211 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1212 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1213 { 1214 uint32_t *cur_dsd, *fcp_dl; 1215 scsi_qla_host_t *vha; 1216 struct scsi_cmnd *cmd; 1217 uint32_t total_bytes = 0; 1218 uint32_t data_bytes; 1219 uint32_t dif_bytes; 1220 uint8_t bundling = 1; 1221 uint16_t blk_size; 1222 uint8_t *clr_ptr; 1223 struct crc_context *crc_ctx_pkt = NULL; 1224 struct qla_hw_data *ha; 1225 uint8_t additional_fcpcdb_len; 1226 uint16_t fcp_cmnd_len; 1227 struct fcp_cmnd *fcp_cmnd; 1228 dma_addr_t crc_ctx_dma; 1229 1230 cmd = GET_CMD_SP(sp); 1231 1232 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1233 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2); 1234 1235 vha = sp->fcport->vha; 1236 ha = vha->hw; 1237 1238 /* No data transfer */ 1239 data_bytes = scsi_bufflen(cmd); 1240 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1241 cmd_pkt->byte_count = cpu_to_le32(0); 1242 return QLA_SUCCESS; 1243 } 1244 1245 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1246 1247 /* Set transfer direction */ 1248 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1249 cmd_pkt->control_flags = 1250 cpu_to_le16(CF_WRITE_DATA); 1251 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1252 cmd_pkt->control_flags = 1253 cpu_to_le16(CF_READ_DATA); 1254 } 1255 1256 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1257 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1258 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1259 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1260 bundling = 0; 1261 1262 /* Allocate CRC context from global pool */ 1263 crc_ctx_pkt = sp->u.scmd.ctx = 1264 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1265 1266 if (!crc_ctx_pkt) 1267 goto crc_queuing_error; 1268 1269 /* Zero out CTX area. */ 1270 clr_ptr = (uint8_t *)crc_ctx_pkt; 1271 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); 1272 1273 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1274 1275 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1276 1277 /* Set handle */ 1278 crc_ctx_pkt->handle = cmd_pkt->handle; 1279 1280 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1281 1282 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1283 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1284 1285 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 1286 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 1287 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 1288 1289 /* Determine SCSI command length -- align to 4 byte boundary */ 1290 if (cmd->cmd_len > 16) { 1291 additional_fcpcdb_len = cmd->cmd_len - 16; 1292 if ((cmd->cmd_len % 4) != 0) { 1293 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1294 goto crc_queuing_error; 1295 } 1296 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1297 } else { 1298 additional_fcpcdb_len = 0; 1299 fcp_cmnd_len = 12 + 16 + 4; 1300 } 1301 1302 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1303 1304 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1305 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1306 fcp_cmnd->additional_cdb_len |= 1; 1307 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1308 fcp_cmnd->additional_cdb_len |= 2; 1309 1310 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1311 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1312 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1313 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1314 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1315 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( 1316 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); 1317 fcp_cmnd->task_management = 0; 1318 fcp_cmnd->task_attribute = TSK_SIMPLE; 1319 1320 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1321 1322 /* Compute dif len and adjust data len to incude protection */ 1323 dif_bytes = 0; 1324 blk_size = cmd->device->sector_size; 1325 dif_bytes = (data_bytes / blk_size) * 8; 1326 1327 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1328 case SCSI_PROT_READ_INSERT: 1329 case SCSI_PROT_WRITE_STRIP: 1330 total_bytes = data_bytes; 1331 data_bytes += dif_bytes; 1332 break; 1333 1334 case SCSI_PROT_READ_STRIP: 1335 case SCSI_PROT_WRITE_INSERT: 1336 case SCSI_PROT_READ_PASS: 1337 case SCSI_PROT_WRITE_PASS: 1338 total_bytes = data_bytes + dif_bytes; 1339 break; 1340 default: 1341 BUG(); 1342 } 1343 1344 if (!qla2x00_hba_err_chk_enabled(sp)) 1345 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1346 /* HBA error checking enabled */ 1347 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1348 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1349 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1350 SCSI_PROT_DIF_TYPE2)) 1351 fw_prot_opts |= BIT_10; 1352 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1353 SCSI_PROT_DIF_TYPE3) 1354 fw_prot_opts |= BIT_11; 1355 } 1356 1357 if (!bundling) { 1358 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1359 } else { 1360 /* 1361 * Configure Bundling if we need to fetch interlaving 1362 * protection PCI accesses 1363 */ 1364 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1365 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1366 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1367 tot_prot_dsds); 1368 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 1369 } 1370 1371 /* Finish the common fields of CRC pkt */ 1372 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1373 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1374 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1375 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1376 /* Fibre channel byte count */ 1377 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1378 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1379 additional_fcpcdb_len); 1380 *fcp_dl = htonl(total_bytes); 1381 1382 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1383 cmd_pkt->byte_count = cpu_to_le32(0); 1384 return QLA_SUCCESS; 1385 } 1386 /* Walks data segments */ 1387 1388 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1389 1390 if (!bundling && tot_prot_dsds) { 1391 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1392 cur_dsd, tot_dsds, NULL)) 1393 goto crc_queuing_error; 1394 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1395 (tot_dsds - tot_prot_dsds), NULL)) 1396 goto crc_queuing_error; 1397 1398 if (bundling && tot_prot_dsds) { 1399 /* Walks dif segments */ 1400 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1401 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1402 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1403 tot_prot_dsds, NULL)) 1404 goto crc_queuing_error; 1405 } 1406 return QLA_SUCCESS; 1407 1408 crc_queuing_error: 1409 /* Cleanup will be performed by the caller */ 1410 1411 return QLA_FUNCTION_FAILED; 1412 } 1413 1414 /** 1415 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1416 * @sp: command to send to the ISP 1417 * 1418 * Returns non-zero if a failure occurred, else zero. 1419 */ 1420 int 1421 qla24xx_start_scsi(srb_t *sp) 1422 { 1423 int nseg; 1424 unsigned long flags; 1425 uint32_t *clr_ptr; 1426 uint32_t index; 1427 uint32_t handle; 1428 struct cmd_type_7 *cmd_pkt; 1429 uint16_t cnt; 1430 uint16_t req_cnt; 1431 uint16_t tot_dsds; 1432 struct req_que *req = NULL; 1433 struct rsp_que *rsp = NULL; 1434 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1435 struct scsi_qla_host *vha = sp->fcport->vha; 1436 struct qla_hw_data *ha = vha->hw; 1437 1438 /* Setup device pointers. */ 1439 qla25xx_set_que(sp, &rsp); 1440 req = vha->req; 1441 1442 /* So we know we haven't pci_map'ed anything yet */ 1443 tot_dsds = 0; 1444 1445 /* Send marker if required */ 1446 if (vha->marker_needed != 0) { 1447 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1448 QLA_SUCCESS) 1449 return QLA_FUNCTION_FAILED; 1450 vha->marker_needed = 0; 1451 } 1452 1453 /* Acquire ring specific lock */ 1454 spin_lock_irqsave(&ha->hardware_lock, flags); 1455 1456 /* Check for room in outstanding command list. */ 1457 handle = req->current_outstanding_cmd; 1458 for (index = 1; index < req->num_outstanding_cmds; index++) { 1459 handle++; 1460 if (handle == req->num_outstanding_cmds) 1461 handle = 1; 1462 if (!req->outstanding_cmds[handle]) 1463 break; 1464 } 1465 if (index == req->num_outstanding_cmds) 1466 goto queuing_error; 1467 1468 /* Map the sg table so we have an accurate count of sg entries needed */ 1469 if (scsi_sg_count(cmd)) { 1470 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1471 scsi_sg_count(cmd), cmd->sc_data_direction); 1472 if (unlikely(!nseg)) 1473 goto queuing_error; 1474 } else 1475 nseg = 0; 1476 1477 tot_dsds = nseg; 1478 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1479 if (req->cnt < (req_cnt + 2)) { 1480 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1481 RD_REG_DWORD_RELAXED(req->req_q_out); 1482 if (req->ring_index < cnt) 1483 req->cnt = cnt - req->ring_index; 1484 else 1485 req->cnt = req->length - 1486 (req->ring_index - cnt); 1487 if (req->cnt < (req_cnt + 2)) 1488 goto queuing_error; 1489 } 1490 1491 /* Build command packet. */ 1492 req->current_outstanding_cmd = handle; 1493 req->outstanding_cmds[handle] = sp; 1494 sp->handle = handle; 1495 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1496 req->cnt -= req_cnt; 1497 1498 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1499 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1500 1501 /* Zero out remaining portion of packet. */ 1502 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1503 clr_ptr = (uint32_t *)cmd_pkt + 2; 1504 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1505 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1506 1507 /* Set NPORT-ID and LUN number*/ 1508 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1513 1514 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1515 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1516 1517 cmd_pkt->task = TSK_SIMPLE; 1518 1519 /* Load SCSI command packet. */ 1520 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1521 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1522 1523 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1524 1525 /* Build IOCB segments */ 1526 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 1527 1528 /* Set total data segment count. */ 1529 cmd_pkt->entry_count = (uint8_t)req_cnt; 1530 /* Specify response queue number where completion should happen */ 1531 cmd_pkt->entry_status = (uint8_t) rsp->id; 1532 wmb(); 1533 /* Adjust ring index. */ 1534 req->ring_index++; 1535 if (req->ring_index == req->length) { 1536 req->ring_index = 0; 1537 req->ring_ptr = req->ring; 1538 } else 1539 req->ring_ptr++; 1540 1541 sp->flags |= SRB_DMA_VALID; 1542 1543 /* Set chip new ring index. */ 1544 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1545 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1546 1547 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1548 if (vha->flags.process_response_queue && 1549 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1550 qla24xx_process_response_queue(vha, rsp); 1551 1552 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1553 return QLA_SUCCESS; 1554 1555 queuing_error: 1556 if (tot_dsds) 1557 scsi_dma_unmap(cmd); 1558 1559 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1560 1561 return QLA_FUNCTION_FAILED; 1562 } 1563 1564 /** 1565 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1566 * @sp: command to send to the ISP 1567 * 1568 * Returns non-zero if a failure occurred, else zero. 1569 */ 1570 int 1571 qla24xx_dif_start_scsi(srb_t *sp) 1572 { 1573 int nseg; 1574 unsigned long flags; 1575 uint32_t *clr_ptr; 1576 uint32_t index; 1577 uint32_t handle; 1578 uint16_t cnt; 1579 uint16_t req_cnt = 0; 1580 uint16_t tot_dsds; 1581 uint16_t tot_prot_dsds; 1582 uint16_t fw_prot_opts = 0; 1583 struct req_que *req = NULL; 1584 struct rsp_que *rsp = NULL; 1585 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1586 struct scsi_qla_host *vha = sp->fcport->vha; 1587 struct qla_hw_data *ha = vha->hw; 1588 struct cmd_type_crc_2 *cmd_pkt; 1589 uint32_t status = 0; 1590 1591 #define QDSS_GOT_Q_SPACE BIT_0 1592 1593 /* Only process protection or >16 cdb in this routine */ 1594 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1595 if (cmd->cmd_len <= 16) 1596 return qla24xx_start_scsi(sp); 1597 } 1598 1599 /* Setup device pointers. */ 1600 1601 qla25xx_set_que(sp, &rsp); 1602 req = vha->req; 1603 1604 /* So we know we haven't pci_map'ed anything yet */ 1605 tot_dsds = 0; 1606 1607 /* Send marker if required */ 1608 if (vha->marker_needed != 0) { 1609 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != 1610 QLA_SUCCESS) 1611 return QLA_FUNCTION_FAILED; 1612 vha->marker_needed = 0; 1613 } 1614 1615 /* Acquire ring specific lock */ 1616 spin_lock_irqsave(&ha->hardware_lock, flags); 1617 1618 /* Check for room in outstanding command list. */ 1619 handle = req->current_outstanding_cmd; 1620 for (index = 1; index < req->num_outstanding_cmds; index++) { 1621 handle++; 1622 if (handle == req->num_outstanding_cmds) 1623 handle = 1; 1624 if (!req->outstanding_cmds[handle]) 1625 break; 1626 } 1627 1628 if (index == req->num_outstanding_cmds) 1629 goto queuing_error; 1630 1631 /* Compute number of required data segments */ 1632 /* Map the sg table so we have an accurate count of sg entries needed */ 1633 if (scsi_sg_count(cmd)) { 1634 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1635 scsi_sg_count(cmd), cmd->sc_data_direction); 1636 if (unlikely(!nseg)) 1637 goto queuing_error; 1638 else 1639 sp->flags |= SRB_DMA_VALID; 1640 1641 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1642 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1643 struct qla2_sgx sgx; 1644 uint32_t partial; 1645 1646 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1647 sgx.tot_bytes = scsi_bufflen(cmd); 1648 sgx.cur_sg = scsi_sglist(cmd); 1649 sgx.sp = sp; 1650 1651 nseg = 0; 1652 while (qla24xx_get_one_block_sg( 1653 cmd->device->sector_size, &sgx, &partial)) 1654 nseg++; 1655 } 1656 } else 1657 nseg = 0; 1658 1659 /* number of required data segments */ 1660 tot_dsds = nseg; 1661 1662 /* Compute number of required protection segments */ 1663 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1664 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1665 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1666 if (unlikely(!nseg)) 1667 goto queuing_error; 1668 else 1669 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1670 1671 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1672 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1673 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1674 } 1675 } else { 1676 nseg = 0; 1677 } 1678 1679 req_cnt = 1; 1680 /* Total Data and protection sg segment(s) */ 1681 tot_prot_dsds = nseg; 1682 tot_dsds += nseg; 1683 if (req->cnt < (req_cnt + 2)) { 1684 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1685 RD_REG_DWORD_RELAXED(req->req_q_out); 1686 if (req->ring_index < cnt) 1687 req->cnt = cnt - req->ring_index; 1688 else 1689 req->cnt = req->length - 1690 (req->ring_index - cnt); 1691 if (req->cnt < (req_cnt + 2)) 1692 goto queuing_error; 1693 } 1694 1695 status |= QDSS_GOT_Q_SPACE; 1696 1697 /* Build header part of command packet (excluding the OPCODE). */ 1698 req->current_outstanding_cmd = handle; 1699 req->outstanding_cmds[handle] = sp; 1700 sp->handle = handle; 1701 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1702 req->cnt -= req_cnt; 1703 1704 /* Fill-in common area */ 1705 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1706 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1707 1708 clr_ptr = (uint32_t *)cmd_pkt + 2; 1709 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1710 1711 /* Set NPORT-ID and LUN number*/ 1712 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1713 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1714 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1715 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1716 1717 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1718 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1719 1720 /* Total Data and protection segment(s) */ 1721 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1722 1723 /* Build IOCB segments and adjust for data protection segments */ 1724 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1725 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1726 QLA_SUCCESS) 1727 goto queuing_error; 1728 1729 cmd_pkt->entry_count = (uint8_t)req_cnt; 1730 /* Specify response queue number where completion should happen */ 1731 cmd_pkt->entry_status = (uint8_t) rsp->id; 1732 cmd_pkt->timeout = cpu_to_le16(0); 1733 wmb(); 1734 1735 /* Adjust ring index. */ 1736 req->ring_index++; 1737 if (req->ring_index == req->length) { 1738 req->ring_index = 0; 1739 req->ring_ptr = req->ring; 1740 } else 1741 req->ring_ptr++; 1742 1743 /* Set chip new ring index. */ 1744 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1745 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 1746 1747 /* Manage unprocessed RIO/ZIO commands in response queue. */ 1748 if (vha->flags.process_response_queue && 1749 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 1750 qla24xx_process_response_queue(vha, rsp); 1751 1752 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1753 1754 return QLA_SUCCESS; 1755 1756 queuing_error: 1757 if (status & QDSS_GOT_Q_SPACE) { 1758 req->outstanding_cmds[handle] = NULL; 1759 req->cnt += req_cnt; 1760 } 1761 /* Cleanup will be performed by the caller (queuecommand) */ 1762 1763 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1764 return QLA_FUNCTION_FAILED; 1765 } 1766 1767 1768 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1769 { 1770 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1771 struct qla_hw_data *ha = sp->fcport->vha->hw; 1772 int affinity = cmd->request->cpu; 1773 1774 if (ha->flags.cpu_affinity_enabled && affinity >= 0 && 1775 affinity < ha->max_rsp_queues - 1) 1776 *rsp = ha->rsp_q_map[affinity + 1]; 1777 else 1778 *rsp = ha->rsp_q_map[0]; 1779 } 1780 1781 /* Generic Control-SRB manipulation functions. */ 1782 1783 /* hardware_lock assumed to be held. */ 1784 void * 1785 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp) 1786 { 1787 if (qla2x00_reset_active(vha)) 1788 return NULL; 1789 1790 return qla2x00_alloc_iocbs(vha, sp); 1791 } 1792 1793 void * 1794 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) 1795 { 1796 struct qla_hw_data *ha = vha->hw; 1797 struct req_que *req = ha->req_q_map[0]; 1798 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 1799 uint32_t index, handle; 1800 request_t *pkt; 1801 uint16_t cnt, req_cnt; 1802 1803 pkt = NULL; 1804 req_cnt = 1; 1805 handle = 0; 1806 1807 if (!sp) 1808 goto skip_cmd_array; 1809 1810 /* Check for room in outstanding command list. */ 1811 handle = req->current_outstanding_cmd; 1812 for (index = 1; index < req->num_outstanding_cmds; index++) { 1813 handle++; 1814 if (handle == req->num_outstanding_cmds) 1815 handle = 1; 1816 if (!req->outstanding_cmds[handle]) 1817 break; 1818 } 1819 if (index == req->num_outstanding_cmds) { 1820 ql_log(ql_log_warn, vha, 0x700b, 1821 "No room on outstanding cmd array.\n"); 1822 goto queuing_error; 1823 } 1824 1825 /* Prep command array. */ 1826 req->current_outstanding_cmd = handle; 1827 req->outstanding_cmds[handle] = sp; 1828 sp->handle = handle; 1829 1830 /* Adjust entry-counts as needed. */ 1831 if (sp->type != SRB_SCSI_CMD) 1832 req_cnt = sp->iocbs; 1833 1834 skip_cmd_array: 1835 /* Check for room on request queue. */ 1836 if (req->cnt < req_cnt + 2) { 1837 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 1838 cnt = RD_REG_DWORD(®->isp25mq.req_q_out); 1839 else if (IS_P3P_TYPE(ha)) 1840 cnt = RD_REG_DWORD(®->isp82.req_q_out); 1841 else if (IS_FWI2_CAPABLE(ha)) 1842 cnt = RD_REG_DWORD(®->isp24.req_q_out); 1843 else if (IS_QLAFX00(ha)) 1844 cnt = RD_REG_DWORD(®->ispfx00.req_q_out); 1845 else 1846 cnt = qla2x00_debounce_register( 1847 ISP_REQ_Q_OUT(ha, ®->isp)); 1848 1849 if (req->ring_index < cnt) 1850 req->cnt = cnt - req->ring_index; 1851 else 1852 req->cnt = req->length - 1853 (req->ring_index - cnt); 1854 } 1855 if (req->cnt < req_cnt + 2) 1856 goto queuing_error; 1857 1858 /* Prep packet */ 1859 req->cnt -= req_cnt; 1860 pkt = req->ring_ptr; 1861 memset(pkt, 0, REQUEST_ENTRY_SIZE); 1862 if (IS_QLAFX00(ha)) { 1863 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); 1864 WRT_REG_WORD((void __iomem *)&pkt->handle, handle); 1865 } else { 1866 pkt->entry_count = req_cnt; 1867 pkt->handle = handle; 1868 } 1869 1870 queuing_error: 1871 return pkt; 1872 } 1873 1874 static void 1875 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1876 { 1877 struct srb_iocb *lio = &sp->u.iocb_cmd; 1878 1879 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1880 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 1881 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 1882 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 1883 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 1884 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 1885 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1886 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1887 logio->port_id[1] = sp->fcport->d_id.b.area; 1888 logio->port_id[2] = sp->fcport->d_id.b.domain; 1889 logio->vp_index = sp->fcport->vha->vp_idx; 1890 } 1891 1892 static void 1893 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 1894 { 1895 struct qla_hw_data *ha = sp->fcport->vha->hw; 1896 struct srb_iocb *lio = &sp->u.iocb_cmd; 1897 uint16_t opts; 1898 1899 mbx->entry_type = MBX_IOCB_TYPE; 1900 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1901 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 1902 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 1903 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 1904 if (HAS_EXTENDED_IDS(ha)) { 1905 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 1906 mbx->mb10 = cpu_to_le16(opts); 1907 } else { 1908 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 1909 } 1910 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1911 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1912 sp->fcport->d_id.b.al_pa); 1913 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1914 } 1915 1916 static void 1917 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1918 { 1919 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1920 logio->control_flags = 1921 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1922 if (!sp->fcport->tgt_session || 1923 !sp->fcport->tgt_session->keep_nport_handle) 1924 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); 1925 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1926 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1927 logio->port_id[1] = sp->fcport->d_id.b.area; 1928 logio->port_id[2] = sp->fcport->d_id.b.domain; 1929 logio->vp_index = sp->fcport->vha->vp_idx; 1930 } 1931 1932 static void 1933 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 1934 { 1935 struct qla_hw_data *ha = sp->fcport->vha->hw; 1936 1937 mbx->entry_type = MBX_IOCB_TYPE; 1938 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1939 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 1940 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 1941 cpu_to_le16(sp->fcport->loop_id): 1942 cpu_to_le16(sp->fcport->loop_id << 8); 1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1945 sp->fcport->d_id.b.al_pa); 1946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1947 /* Implicit: mbx->mbx10 = 0. */ 1948 } 1949 1950 static void 1951 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1952 { 1953 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1954 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 1955 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1956 logio->vp_index = sp->fcport->vha->vp_idx; 1957 } 1958 1959 static void 1960 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 1961 { 1962 struct qla_hw_data *ha = sp->fcport->vha->hw; 1963 1964 mbx->entry_type = MBX_IOCB_TYPE; 1965 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 1966 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 1967 if (HAS_EXTENDED_IDS(ha)) { 1968 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 1969 mbx->mb10 = cpu_to_le16(BIT_0); 1970 } else { 1971 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 1972 } 1973 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 1974 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 1975 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 1976 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 1977 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx); 1978 } 1979 1980 static void 1981 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 1982 { 1983 uint32_t flags; 1984 uint64_t lun; 1985 struct fc_port *fcport = sp->fcport; 1986 scsi_qla_host_t *vha = fcport->vha; 1987 struct qla_hw_data *ha = vha->hw; 1988 struct srb_iocb *iocb = &sp->u.iocb_cmd; 1989 struct req_que *req = vha->req; 1990 1991 flags = iocb->u.tmf.flags; 1992 lun = iocb->u.tmf.lun; 1993 1994 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 1995 tsk->entry_count = 1; 1996 tsk->handle = MAKE_HANDLE(req->id, tsk->handle); 1997 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 1998 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1999 tsk->control_flags = cpu_to_le32(flags); 2000 tsk->port_id[0] = fcport->d_id.b.al_pa; 2001 tsk->port_id[1] = fcport->d_id.b.area; 2002 tsk->port_id[2] = fcport->d_id.b.domain; 2003 tsk->vp_index = fcport->vha->vp_idx; 2004 2005 if (flags == TCF_LUN_RESET) { 2006 int_to_scsilun(lun, &tsk->lun); 2007 host_to_fcp_swap((uint8_t *)&tsk->lun, 2008 sizeof(tsk->lun)); 2009 } 2010 } 2011 2012 static void 2013 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2014 { 2015 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2016 2017 els_iocb->entry_type = ELS_IOCB_TYPE; 2018 els_iocb->entry_count = 1; 2019 els_iocb->sys_define = 0; 2020 els_iocb->entry_status = 0; 2021 els_iocb->handle = sp->handle; 2022 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2023 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2024 els_iocb->vp_index = sp->fcport->vha->vp_idx; 2025 els_iocb->sof_type = EST_SOFI3; 2026 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2027 2028 els_iocb->opcode = 2029 sp->type == SRB_ELS_CMD_RPT ? 2030 bsg_job->request->rqst_data.r_els.els_code : 2031 bsg_job->request->rqst_data.h_els.command_code; 2032 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2033 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2034 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2035 els_iocb->control_flags = 0; 2036 els_iocb->rx_byte_count = 2037 cpu_to_le32(bsg_job->reply_payload.payload_len); 2038 els_iocb->tx_byte_count = 2039 cpu_to_le32(bsg_job->request_payload.payload_len); 2040 2041 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address 2042 (bsg_job->request_payload.sg_list))); 2043 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address 2044 (bsg_job->request_payload.sg_list))); 2045 els_iocb->tx_len = cpu_to_le32(sg_dma_len 2046 (bsg_job->request_payload.sg_list)); 2047 2048 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address 2049 (bsg_job->reply_payload.sg_list))); 2050 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address 2051 (bsg_job->reply_payload.sg_list))); 2052 els_iocb->rx_len = cpu_to_le32(sg_dma_len 2053 (bsg_job->reply_payload.sg_list)); 2054 2055 sp->fcport->vha->qla_stats.control_requests++; 2056 } 2057 2058 static void 2059 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 2060 { 2061 uint16_t avail_dsds; 2062 uint32_t *cur_dsd; 2063 struct scatterlist *sg; 2064 int index; 2065 uint16_t tot_dsds; 2066 scsi_qla_host_t *vha = sp->fcport->vha; 2067 struct qla_hw_data *ha = vha->hw; 2068 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2069 int loop_iterartion = 0; 2070 int entry_count = 1; 2071 2072 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 2073 ct_iocb->entry_type = CT_IOCB_TYPE; 2074 ct_iocb->entry_status = 0; 2075 ct_iocb->handle1 = sp->handle; 2076 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 2077 ct_iocb->status = cpu_to_le16(0); 2078 ct_iocb->control_flags = cpu_to_le16(0); 2079 ct_iocb->timeout = 0; 2080 ct_iocb->cmd_dsd_count = 2081 cpu_to_le16(bsg_job->request_payload.sg_cnt); 2082 ct_iocb->total_dsd_count = 2083 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 2084 ct_iocb->req_bytecount = 2085 cpu_to_le32(bsg_job->request_payload.payload_len); 2086 ct_iocb->rsp_bytecount = 2087 cpu_to_le32(bsg_job->reply_payload.payload_len); 2088 2089 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address 2090 (bsg_job->request_payload.sg_list))); 2091 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address 2092 (bsg_job->request_payload.sg_list))); 2093 ct_iocb->dseg_req_length = ct_iocb->req_bytecount; 2094 2095 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address 2096 (bsg_job->reply_payload.sg_list))); 2097 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address 2098 (bsg_job->reply_payload.sg_list))); 2099 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount; 2100 2101 avail_dsds = 1; 2102 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address; 2103 index = 0; 2104 tot_dsds = bsg_job->reply_payload.sg_cnt; 2105 2106 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2107 dma_addr_t sle_dma; 2108 cont_a64_entry_t *cont_pkt; 2109 2110 /* Allocate additional continuation packets? */ 2111 if (avail_dsds == 0) { 2112 /* 2113 * Five DSDs are available in the Cont. 2114 * Type 1 IOCB. 2115 */ 2116 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2117 vha->hw->req_q_map[0]); 2118 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2119 avail_dsds = 5; 2120 entry_count++; 2121 } 2122 2123 sle_dma = sg_dma_address(sg); 2124 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2125 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2126 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2127 loop_iterartion++; 2128 avail_dsds--; 2129 } 2130 ct_iocb->entry_count = entry_count; 2131 2132 sp->fcport->vha->qla_stats.control_requests++; 2133 } 2134 2135 static void 2136 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 2137 { 2138 uint16_t avail_dsds; 2139 uint32_t *cur_dsd; 2140 struct scatterlist *sg; 2141 int index; 2142 uint16_t tot_dsds; 2143 scsi_qla_host_t *vha = sp->fcport->vha; 2144 struct qla_hw_data *ha = vha->hw; 2145 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2146 int loop_iterartion = 0; 2147 int entry_count = 1; 2148 2149 ct_iocb->entry_type = CT_IOCB_TYPE; 2150 ct_iocb->entry_status = 0; 2151 ct_iocb->sys_define = 0; 2152 ct_iocb->handle = sp->handle; 2153 2154 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2155 ct_iocb->vp_index = sp->fcport->vha->vp_idx; 2156 ct_iocb->comp_status = cpu_to_le16(0); 2157 2158 ct_iocb->cmd_dsd_count = 2159 cpu_to_le16(bsg_job->request_payload.sg_cnt); 2160 ct_iocb->timeout = 0; 2161 ct_iocb->rsp_dsd_count = 2162 cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2163 ct_iocb->rsp_byte_count = 2164 cpu_to_le32(bsg_job->reply_payload.payload_len); 2165 ct_iocb->cmd_byte_count = 2166 cpu_to_le32(bsg_job->request_payload.payload_len); 2167 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address 2168 (bsg_job->request_payload.sg_list))); 2169 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address 2170 (bsg_job->request_payload.sg_list))); 2171 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len 2172 (bsg_job->request_payload.sg_list)); 2173 2174 avail_dsds = 1; 2175 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; 2176 index = 0; 2177 tot_dsds = bsg_job->reply_payload.sg_cnt; 2178 2179 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2180 dma_addr_t sle_dma; 2181 cont_a64_entry_t *cont_pkt; 2182 2183 /* Allocate additional continuation packets? */ 2184 if (avail_dsds == 0) { 2185 /* 2186 * Five DSDs are available in the Cont. 2187 * Type 1 IOCB. 2188 */ 2189 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2190 ha->req_q_map[0]); 2191 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2192 avail_dsds = 5; 2193 entry_count++; 2194 } 2195 2196 sle_dma = sg_dma_address(sg); 2197 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2198 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2199 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2200 loop_iterartion++; 2201 avail_dsds--; 2202 } 2203 ct_iocb->entry_count = entry_count; 2204 } 2205 2206 /* 2207 * qla82xx_start_scsi() - Send a SCSI command to the ISP 2208 * @sp: command to send to the ISP 2209 * 2210 * Returns non-zero if a failure occurred, else zero. 2211 */ 2212 int 2213 qla82xx_start_scsi(srb_t *sp) 2214 { 2215 int nseg; 2216 unsigned long flags; 2217 struct scsi_cmnd *cmd; 2218 uint32_t *clr_ptr; 2219 uint32_t index; 2220 uint32_t handle; 2221 uint16_t cnt; 2222 uint16_t req_cnt; 2223 uint16_t tot_dsds; 2224 struct device_reg_82xx __iomem *reg; 2225 uint32_t dbval; 2226 uint32_t *fcp_dl; 2227 uint8_t additional_cdb_len; 2228 struct ct6_dsd *ctx; 2229 struct scsi_qla_host *vha = sp->fcport->vha; 2230 struct qla_hw_data *ha = vha->hw; 2231 struct req_que *req = NULL; 2232 struct rsp_que *rsp = NULL; 2233 2234 /* Setup device pointers. */ 2235 reg = &ha->iobase->isp82; 2236 cmd = GET_CMD_SP(sp); 2237 req = vha->req; 2238 rsp = ha->rsp_q_map[0]; 2239 2240 /* So we know we haven't pci_map'ed anything yet */ 2241 tot_dsds = 0; 2242 2243 dbval = 0x04 | (ha->portnum << 5); 2244 2245 /* Send marker if required */ 2246 if (vha->marker_needed != 0) { 2247 if (qla2x00_marker(vha, req, 2248 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 2249 ql_log(ql_log_warn, vha, 0x300c, 2250 "qla2x00_marker failed for cmd=%p.\n", cmd); 2251 return QLA_FUNCTION_FAILED; 2252 } 2253 vha->marker_needed = 0; 2254 } 2255 2256 /* Acquire ring specific lock */ 2257 spin_lock_irqsave(&ha->hardware_lock, flags); 2258 2259 /* Check for room in outstanding command list. */ 2260 handle = req->current_outstanding_cmd; 2261 for (index = 1; index < req->num_outstanding_cmds; index++) { 2262 handle++; 2263 if (handle == req->num_outstanding_cmds) 2264 handle = 1; 2265 if (!req->outstanding_cmds[handle]) 2266 break; 2267 } 2268 if (index == req->num_outstanding_cmds) 2269 goto queuing_error; 2270 2271 /* Map the sg table so we have an accurate count of sg entries needed */ 2272 if (scsi_sg_count(cmd)) { 2273 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2274 scsi_sg_count(cmd), cmd->sc_data_direction); 2275 if (unlikely(!nseg)) 2276 goto queuing_error; 2277 } else 2278 nseg = 0; 2279 2280 tot_dsds = nseg; 2281 2282 if (tot_dsds > ql2xshiftctondsd) { 2283 struct cmd_type_6 *cmd_pkt; 2284 uint16_t more_dsd_lists = 0; 2285 struct dsd_dma *dsd_ptr; 2286 uint16_t i; 2287 2288 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 2289 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 2290 ql_dbg(ql_dbg_io, vha, 0x300d, 2291 "Num of DSD list %d is than %d for cmd=%p.\n", 2292 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 2293 cmd); 2294 goto queuing_error; 2295 } 2296 2297 if (more_dsd_lists <= ha->gbl_dsd_avail) 2298 goto sufficient_dsds; 2299 else 2300 more_dsd_lists -= ha->gbl_dsd_avail; 2301 2302 for (i = 0; i < more_dsd_lists; i++) { 2303 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 2304 if (!dsd_ptr) { 2305 ql_log(ql_log_fatal, vha, 0x300e, 2306 "Failed to allocate memory for dsd_dma " 2307 "for cmd=%p.\n", cmd); 2308 goto queuing_error; 2309 } 2310 2311 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 2312 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 2313 if (!dsd_ptr->dsd_addr) { 2314 kfree(dsd_ptr); 2315 ql_log(ql_log_fatal, vha, 0x300f, 2316 "Failed to allocate memory for dsd_addr " 2317 "for cmd=%p.\n", cmd); 2318 goto queuing_error; 2319 } 2320 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 2321 ha->gbl_dsd_avail++; 2322 } 2323 2324 sufficient_dsds: 2325 req_cnt = 1; 2326 2327 if (req->cnt < (req_cnt + 2)) { 2328 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2329 ®->req_q_out[0]); 2330 if (req->ring_index < cnt) 2331 req->cnt = cnt - req->ring_index; 2332 else 2333 req->cnt = req->length - 2334 (req->ring_index - cnt); 2335 if (req->cnt < (req_cnt + 2)) 2336 goto queuing_error; 2337 } 2338 2339 ctx = sp->u.scmd.ctx = 2340 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2341 if (!ctx) { 2342 ql_log(ql_log_fatal, vha, 0x3010, 2343 "Failed to allocate ctx for cmd=%p.\n", cmd); 2344 goto queuing_error; 2345 } 2346 2347 memset(ctx, 0, sizeof(struct ct6_dsd)); 2348 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2349 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2350 if (!ctx->fcp_cmnd) { 2351 ql_log(ql_log_fatal, vha, 0x3011, 2352 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2353 goto queuing_error; 2354 } 2355 2356 /* Initialize the DSD list and dma handle */ 2357 INIT_LIST_HEAD(&ctx->dsd_list); 2358 ctx->dsd_use_cnt = 0; 2359 2360 if (cmd->cmd_len > 16) { 2361 additional_cdb_len = cmd->cmd_len - 16; 2362 if ((cmd->cmd_len % 4) != 0) { 2363 /* SCSI command bigger than 16 bytes must be 2364 * multiple of 4 2365 */ 2366 ql_log(ql_log_warn, vha, 0x3012, 2367 "scsi cmd len %d not multiple of 4 " 2368 "for cmd=%p.\n", cmd->cmd_len, cmd); 2369 goto queuing_error_fcp_cmnd; 2370 } 2371 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 2372 } else { 2373 additional_cdb_len = 0; 2374 ctx->fcp_cmnd_len = 12 + 16 + 4; 2375 } 2376 2377 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 2378 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2379 2380 /* Zero out remaining portion of packet. */ 2381 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 2382 clr_ptr = (uint32_t *)cmd_pkt + 2; 2383 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2384 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2385 2386 /* Set NPORT-ID and LUN number*/ 2387 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2388 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2389 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2390 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2391 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2392 2393 /* Build IOCB segments */ 2394 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2395 goto queuing_error_fcp_cmnd; 2396 2397 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2398 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2399 2400 /* build FCP_CMND IU */ 2401 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2402 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 2403 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2404 2405 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2406 ctx->fcp_cmnd->additional_cdb_len |= 1; 2407 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 2408 ctx->fcp_cmnd->additional_cdb_len |= 2; 2409 2410 /* Populate the FCP_PRIO. */ 2411 if (ha->flags.fcp_prio_enabled) 2412 ctx->fcp_cmnd->task_attribute |= 2413 sp->fcport->fcp_prio << 3; 2414 2415 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 2416 2417 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 2418 additional_cdb_len); 2419 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 2420 2421 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 2422 cmd_pkt->fcp_cmnd_dseg_address[0] = 2423 cpu_to_le32(LSD(ctx->fcp_cmnd_dma)); 2424 cmd_pkt->fcp_cmnd_dseg_address[1] = 2425 cpu_to_le32(MSD(ctx->fcp_cmnd_dma)); 2426 2427 sp->flags |= SRB_FCP_CMND_DMA_VALID; 2428 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2429 /* Set total data segment count. */ 2430 cmd_pkt->entry_count = (uint8_t)req_cnt; 2431 /* Specify response queue number where 2432 * completion should happen 2433 */ 2434 cmd_pkt->entry_status = (uint8_t) rsp->id; 2435 } else { 2436 struct cmd_type_7 *cmd_pkt; 2437 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2438 if (req->cnt < (req_cnt + 2)) { 2439 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 2440 ®->req_q_out[0]); 2441 if (req->ring_index < cnt) 2442 req->cnt = cnt - req->ring_index; 2443 else 2444 req->cnt = req->length - 2445 (req->ring_index - cnt); 2446 } 2447 if (req->cnt < (req_cnt + 2)) 2448 goto queuing_error; 2449 2450 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 2451 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2452 2453 /* Zero out remaining portion of packet. */ 2454 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2455 clr_ptr = (uint32_t *)cmd_pkt + 2; 2456 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2457 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2458 2459 /* Set NPORT-ID and LUN number*/ 2460 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2461 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2462 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2463 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2464 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 2465 2466 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2467 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2468 sizeof(cmd_pkt->lun)); 2469 2470 /* Populate the FCP_PRIO. */ 2471 if (ha->flags.fcp_prio_enabled) 2472 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 2473 2474 /* Load SCSI command packet. */ 2475 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2476 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2477 2478 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2479 2480 /* Build IOCB segments */ 2481 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 2482 2483 /* Set total data segment count. */ 2484 cmd_pkt->entry_count = (uint8_t)req_cnt; 2485 /* Specify response queue number where 2486 * completion should happen. 2487 */ 2488 cmd_pkt->entry_status = (uint8_t) rsp->id; 2489 2490 } 2491 /* Build command packet. */ 2492 req->current_outstanding_cmd = handle; 2493 req->outstanding_cmds[handle] = sp; 2494 sp->handle = handle; 2495 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2496 req->cnt -= req_cnt; 2497 wmb(); 2498 2499 /* Adjust ring index. */ 2500 req->ring_index++; 2501 if (req->ring_index == req->length) { 2502 req->ring_index = 0; 2503 req->ring_ptr = req->ring; 2504 } else 2505 req->ring_ptr++; 2506 2507 sp->flags |= SRB_DMA_VALID; 2508 2509 /* Set chip new ring index. */ 2510 /* write, read and verify logic */ 2511 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2512 if (ql2xdbwr) 2513 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 2514 else { 2515 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 2516 wmb(); 2517 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 2518 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 2519 wmb(); 2520 } 2521 } 2522 2523 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2524 if (vha->flags.process_response_queue && 2525 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2526 qla24xx_process_response_queue(vha, rsp); 2527 2528 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2529 return QLA_SUCCESS; 2530 2531 queuing_error_fcp_cmnd: 2532 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 2533 queuing_error: 2534 if (tot_dsds) 2535 scsi_dma_unmap(cmd); 2536 2537 if (sp->u.scmd.ctx) { 2538 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); 2539 sp->u.scmd.ctx = NULL; 2540 } 2541 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2542 2543 return QLA_FUNCTION_FAILED; 2544 } 2545 2546 static void 2547 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 2548 { 2549 struct srb_iocb *aio = &sp->u.iocb_cmd; 2550 scsi_qla_host_t *vha = sp->fcport->vha; 2551 struct req_que *req = vha->req; 2552 2553 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 2554 abt_iocb->entry_type = ABORT_IOCB_TYPE; 2555 abt_iocb->entry_count = 1; 2556 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 2557 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2558 abt_iocb->handle_to_abort = 2559 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); 2560 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2561 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 2562 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2563 abt_iocb->vp_index = vha->vp_idx; 2564 abt_iocb->req_que_no = cpu_to_le16(req->id); 2565 /* Send the command to the firmware */ 2566 wmb(); 2567 } 2568 2569 int 2570 qla2x00_start_sp(srb_t *sp) 2571 { 2572 int rval; 2573 struct qla_hw_data *ha = sp->fcport->vha->hw; 2574 void *pkt; 2575 unsigned long flags; 2576 2577 rval = QLA_FUNCTION_FAILED; 2578 spin_lock_irqsave(&ha->hardware_lock, flags); 2579 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp); 2580 if (!pkt) { 2581 ql_log(ql_log_warn, sp->fcport->vha, 0x700c, 2582 "qla2x00_alloc_iocbs failed.\n"); 2583 goto done; 2584 } 2585 2586 rval = QLA_SUCCESS; 2587 switch (sp->type) { 2588 case SRB_LOGIN_CMD: 2589 IS_FWI2_CAPABLE(ha) ? 2590 qla24xx_login_iocb(sp, pkt) : 2591 qla2x00_login_iocb(sp, pkt); 2592 break; 2593 case SRB_LOGOUT_CMD: 2594 IS_FWI2_CAPABLE(ha) ? 2595 qla24xx_logout_iocb(sp, pkt) : 2596 qla2x00_logout_iocb(sp, pkt); 2597 break; 2598 case SRB_ELS_CMD_RPT: 2599 case SRB_ELS_CMD_HST: 2600 qla24xx_els_iocb(sp, pkt); 2601 break; 2602 case SRB_CT_CMD: 2603 IS_FWI2_CAPABLE(ha) ? 2604 qla24xx_ct_iocb(sp, pkt) : 2605 qla2x00_ct_iocb(sp, pkt); 2606 break; 2607 case SRB_ADISC_CMD: 2608 IS_FWI2_CAPABLE(ha) ? 2609 qla24xx_adisc_iocb(sp, pkt) : 2610 qla2x00_adisc_iocb(sp, pkt); 2611 break; 2612 case SRB_TM_CMD: 2613 IS_QLAFX00(ha) ? 2614 qlafx00_tm_iocb(sp, pkt) : 2615 qla24xx_tm_iocb(sp, pkt); 2616 break; 2617 case SRB_FXIOCB_DCMD: 2618 case SRB_FXIOCB_BCMD: 2619 qlafx00_fxdisc_iocb(sp, pkt); 2620 break; 2621 case SRB_ABT_CMD: 2622 IS_QLAFX00(ha) ? 2623 qlafx00_abort_iocb(sp, pkt) : 2624 qla24xx_abort_iocb(sp, pkt); 2625 break; 2626 default: 2627 break; 2628 } 2629 2630 wmb(); 2631 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]); 2632 done: 2633 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2634 return rval; 2635 } 2636 2637 static void 2638 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 2639 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 2640 { 2641 uint16_t avail_dsds; 2642 uint32_t *cur_dsd; 2643 uint32_t req_data_len = 0; 2644 uint32_t rsp_data_len = 0; 2645 struct scatterlist *sg; 2646 int index; 2647 int entry_count = 1; 2648 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2649 2650 /*Update entry type to indicate bidir command */ 2651 *((uint32_t *)(&cmd_pkt->entry_type)) = 2652 cpu_to_le32(COMMAND_BIDIRECTIONAL); 2653 2654 /* Set the transfer direction, in this set both flags 2655 * Also set the BD_WRAP_BACK flag, firmware will take care 2656 * assigning DID=SID for outgoing pkts. 2657 */ 2658 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2659 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2660 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 2661 BD_WRAP_BACK); 2662 2663 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 2664 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 2665 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 2666 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 2667 2668 vha->bidi_stats.transfer_bytes += req_data_len; 2669 vha->bidi_stats.io_count++; 2670 2671 vha->qla_stats.output_bytes += req_data_len; 2672 vha->qla_stats.output_requests++; 2673 2674 /* Only one dsd is available for bidirectional IOCB, remaining dsds 2675 * are bundled in continuation iocb 2676 */ 2677 avail_dsds = 1; 2678 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2679 2680 index = 0; 2681 2682 for_each_sg(bsg_job->request_payload.sg_list, sg, 2683 bsg_job->request_payload.sg_cnt, index) { 2684 dma_addr_t sle_dma; 2685 cont_a64_entry_t *cont_pkt; 2686 2687 /* Allocate additional continuation packets */ 2688 if (avail_dsds == 0) { 2689 /* Continuation type 1 IOCB can accomodate 2690 * 5 DSDS 2691 */ 2692 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 2693 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2694 avail_dsds = 5; 2695 entry_count++; 2696 } 2697 sle_dma = sg_dma_address(sg); 2698 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2699 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2700 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2701 avail_dsds--; 2702 } 2703 /* For read request DSD will always goes to continuation IOCB 2704 * and follow the write DSD. If there is room on the current IOCB 2705 * then it is added to that IOCB else new continuation IOCB is 2706 * allocated. 2707 */ 2708 for_each_sg(bsg_job->reply_payload.sg_list, sg, 2709 bsg_job->reply_payload.sg_cnt, index) { 2710 dma_addr_t sle_dma; 2711 cont_a64_entry_t *cont_pkt; 2712 2713 /* Allocate additional continuation packets */ 2714 if (avail_dsds == 0) { 2715 /* Continuation type 1 IOCB can accomodate 2716 * 5 DSDS 2717 */ 2718 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 2719 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2720 avail_dsds = 5; 2721 entry_count++; 2722 } 2723 sle_dma = sg_dma_address(sg); 2724 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 2725 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 2726 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 2727 avail_dsds--; 2728 } 2729 /* This value should be same as number of IOCB required for this cmd */ 2730 cmd_pkt->entry_count = entry_count; 2731 } 2732 2733 int 2734 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 2735 { 2736 2737 struct qla_hw_data *ha = vha->hw; 2738 unsigned long flags; 2739 uint32_t handle; 2740 uint32_t index; 2741 uint16_t req_cnt; 2742 uint16_t cnt; 2743 uint32_t *clr_ptr; 2744 struct cmd_bidir *cmd_pkt = NULL; 2745 struct rsp_que *rsp; 2746 struct req_que *req; 2747 int rval = EXT_STATUS_OK; 2748 2749 rval = QLA_SUCCESS; 2750 2751 rsp = ha->rsp_q_map[0]; 2752 req = vha->req; 2753 2754 /* Send marker if required */ 2755 if (vha->marker_needed != 0) { 2756 if (qla2x00_marker(vha, req, 2757 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 2758 return EXT_STATUS_MAILBOX; 2759 vha->marker_needed = 0; 2760 } 2761 2762 /* Acquire ring specific lock */ 2763 spin_lock_irqsave(&ha->hardware_lock, flags); 2764 2765 /* Check for room in outstanding command list. */ 2766 handle = req->current_outstanding_cmd; 2767 for (index = 1; index < req->num_outstanding_cmds; index++) { 2768 handle++; 2769 if (handle == req->num_outstanding_cmds) 2770 handle = 1; 2771 if (!req->outstanding_cmds[handle]) 2772 break; 2773 } 2774 2775 if (index == req->num_outstanding_cmds) { 2776 rval = EXT_STATUS_BUSY; 2777 goto queuing_error; 2778 } 2779 2780 /* Calculate number of IOCB required */ 2781 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 2782 2783 /* Check for room on request queue. */ 2784 if (req->cnt < req_cnt + 2) { 2785 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2786 RD_REG_DWORD_RELAXED(req->req_q_out); 2787 if (req->ring_index < cnt) 2788 req->cnt = cnt - req->ring_index; 2789 else 2790 req->cnt = req->length - 2791 (req->ring_index - cnt); 2792 } 2793 if (req->cnt < req_cnt + 2) { 2794 rval = EXT_STATUS_BUSY; 2795 goto queuing_error; 2796 } 2797 2798 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 2799 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2800 2801 /* Zero out remaining portion of packet. */ 2802 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 2803 clr_ptr = (uint32_t *)cmd_pkt + 2; 2804 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2805 2806 /* Set NPORT-ID (of vha)*/ 2807 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 2808 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 2809 cmd_pkt->port_id[1] = vha->d_id.b.area; 2810 cmd_pkt->port_id[2] = vha->d_id.b.domain; 2811 2812 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 2813 cmd_pkt->entry_status = (uint8_t) rsp->id; 2814 /* Build command packet. */ 2815 req->current_outstanding_cmd = handle; 2816 req->outstanding_cmds[handle] = sp; 2817 sp->handle = handle; 2818 req->cnt -= req_cnt; 2819 2820 /* Send the command to the firmware */ 2821 wmb(); 2822 qla2x00_start_iocbs(vha, req); 2823 queuing_error: 2824 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2825 return rval; 2826 } 2827