1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/blkdev.h> 11 #include <linux/delay.h> 12 13 #include <scsi/scsi_tcq.h> 14 15 /** 16 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 17 * @sp: SCSI command 18 * 19 * Returns the proper CF_* direction based on CDB. 20 */ 21 static inline uint16_t 22 qla2x00_get_cmd_direction(srb_t *sp) 23 { 24 uint16_t cflags; 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 26 struct scsi_qla_host *vha = sp->vha; 27 28 cflags = 0; 29 30 /* Set transfer direction */ 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 cflags = CF_WRITE; 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 34 vha->qla_stats.output_requests++; 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 cflags = CF_READ; 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 38 vha->qla_stats.input_requests++; 39 } 40 return (cflags); 41 } 42 43 /** 44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and 45 * Continuation Type 0 IOCBs to allocate. 46 * 47 * @dsds: number of data segment decriptors needed 48 * 49 * Returns the number of IOCB entries needed to store @dsds. 50 */ 51 uint16_t 52 qla2x00_calc_iocbs_32(uint16_t dsds) 53 { 54 uint16_t iocbs; 55 56 iocbs = 1; 57 if (dsds > 3) { 58 iocbs += (dsds - 3) / 7; 59 if ((dsds - 3) % 7) 60 iocbs++; 61 } 62 return (iocbs); 63 } 64 65 /** 66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and 67 * Continuation Type 1 IOCBs to allocate. 68 * 69 * @dsds: number of data segment decriptors needed 70 * 71 * Returns the number of IOCB entries needed to store @dsds. 72 */ 73 uint16_t 74 qla2x00_calc_iocbs_64(uint16_t dsds) 75 { 76 uint16_t iocbs; 77 78 iocbs = 1; 79 if (dsds > 2) { 80 iocbs += (dsds - 2) / 5; 81 if ((dsds - 2) % 5) 82 iocbs++; 83 } 84 return (iocbs); 85 } 86 87 /** 88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. 89 * @vha: HA context 90 * 91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 */ 93 static inline cont_entry_t * 94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) 95 { 96 cont_entry_t *cont_pkt; 97 struct req_que *req = vha->req; 98 /* Adjust ring index. */ 99 req->ring_index++; 100 if (req->ring_index == req->length) { 101 req->ring_index = 0; 102 req->ring_ptr = req->ring; 103 } else { 104 req->ring_ptr++; 105 } 106 107 cont_pkt = (cont_entry_t *)req->ring_ptr; 108 109 /* Load packet defaults. */ 110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type); 111 112 return (cont_pkt); 113 } 114 115 /** 116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. 117 * @vha: HA context 118 * @req: request queue 119 * 120 * Returns a pointer to the continuation type 1 IOCB packet. 121 */ 122 static inline cont_a64_entry_t * 123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) 124 { 125 cont_a64_entry_t *cont_pkt; 126 127 /* Adjust ring index. */ 128 req->ring_index++; 129 if (req->ring_index == req->length) { 130 req->ring_index = 0; 131 req->ring_ptr = req->ring; 132 } else { 133 req->ring_ptr++; 134 } 135 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; 137 138 /* Load packet defaults. */ 139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 : 140 CONTINUE_A64_TYPE, &cont_pkt->entry_type); 141 142 return (cont_pkt); 143 } 144 145 inline int 146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 147 { 148 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 149 uint8_t guard = scsi_host_get_guard(cmd->device->host); 150 151 /* We always use DIFF Bundling for best performance */ 152 *fw_prot_opts = 0; 153 154 /* Translate SCSI opcode to a protection opcode */ 155 switch (scsi_get_prot_op(cmd)) { 156 case SCSI_PROT_READ_STRIP: 157 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 158 break; 159 case SCSI_PROT_WRITE_INSERT: 160 *fw_prot_opts |= PO_MODE_DIF_INSERT; 161 break; 162 case SCSI_PROT_READ_INSERT: 163 *fw_prot_opts |= PO_MODE_DIF_INSERT; 164 break; 165 case SCSI_PROT_WRITE_STRIP: 166 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 167 break; 168 case SCSI_PROT_READ_PASS: 169 case SCSI_PROT_WRITE_PASS: 170 if (guard & SHOST_DIX_GUARD_IP) 171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; 172 else 173 *fw_prot_opts |= PO_MODE_DIF_PASS; 174 break; 175 default: /* Normal Request */ 176 *fw_prot_opts |= PO_MODE_DIF_PASS; 177 break; 178 } 179 180 return scsi_prot_sg_count(cmd); 181 } 182 183 /* 184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit 185 * capable IOCB types. 186 * 187 * @sp: SRB command to process 188 * @cmd_pkt: Command type 2 IOCB 189 * @tot_dsds: Total number of segments to transfer 190 */ 191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, 192 uint16_t tot_dsds) 193 { 194 uint16_t avail_dsds; 195 struct dsd32 *cur_dsd; 196 scsi_qla_host_t *vha; 197 struct scsi_cmnd *cmd; 198 struct scatterlist *sg; 199 int i; 200 201 cmd = GET_CMD_SP(sp); 202 203 /* Update entry type to indicate Command Type 2 IOCB */ 204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type); 205 206 /* No data transfer */ 207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 208 cmd_pkt->byte_count = cpu_to_le32(0); 209 return; 210 } 211 212 vha = sp->vha; 213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 214 215 /* Three DSDs are available in the Command Type 2 IOCB */ 216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32); 217 cur_dsd = cmd_pkt->dsd32; 218 219 /* Load data segments */ 220 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 221 cont_entry_t *cont_pkt; 222 223 /* Allocate additional continuation packets? */ 224 if (avail_dsds == 0) { 225 /* 226 * Seven DSDs are available in the Continuation 227 * Type 0 IOCB. 228 */ 229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha); 230 cur_dsd = cont_pkt->dsd; 231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 232 } 233 234 append_dsd32(&cur_dsd, sg); 235 avail_dsds--; 236 } 237 } 238 239 /** 240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit 241 * capable IOCB types. 242 * 243 * @sp: SRB command to process 244 * @cmd_pkt: Command type 3 IOCB 245 * @tot_dsds: Total number of segments to transfer 246 */ 247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, 248 uint16_t tot_dsds) 249 { 250 uint16_t avail_dsds; 251 struct dsd64 *cur_dsd; 252 scsi_qla_host_t *vha; 253 struct scsi_cmnd *cmd; 254 struct scatterlist *sg; 255 int i; 256 257 cmd = GET_CMD_SP(sp); 258 259 /* Update entry type to indicate Command Type 3 IOCB */ 260 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type); 261 262 /* No data transfer */ 263 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 264 cmd_pkt->byte_count = cpu_to_le32(0); 265 return; 266 } 267 268 vha = sp->vha; 269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 270 271 /* Two DSDs are available in the Command Type 3 IOCB */ 272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64); 273 cur_dsd = cmd_pkt->dsd64; 274 275 /* Load data segments */ 276 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 277 cont_a64_entry_t *cont_pkt; 278 279 /* Allocate additional continuation packets? */ 280 if (avail_dsds == 0) { 281 /* 282 * Five DSDs are available in the Continuation 283 * Type 1 IOCB. 284 */ 285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 286 cur_dsd = cont_pkt->dsd; 287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 288 } 289 290 append_dsd64(&cur_dsd, sg); 291 avail_dsds--; 292 } 293 } 294 295 /* 296 * Find the first handle that is not in use, starting from 297 * req->current_outstanding_cmd + 1. The caller must hold the lock that is 298 * associated with @req. 299 */ 300 uint32_t qla2xxx_get_next_handle(struct req_que *req) 301 { 302 uint32_t index, handle = req->current_outstanding_cmd; 303 304 for (index = 1; index < req->num_outstanding_cmds; index++) { 305 handle++; 306 if (handle == req->num_outstanding_cmds) 307 handle = 1; 308 if (!req->outstanding_cmds[handle]) 309 return handle; 310 } 311 312 return 0; 313 } 314 315 /** 316 * qla2x00_start_scsi() - Send a SCSI command to the ISP 317 * @sp: command to send to the ISP 318 * 319 * Returns non-zero if a failure occurred, else zero. 320 */ 321 int 322 qla2x00_start_scsi(srb_t *sp) 323 { 324 int nseg; 325 unsigned long flags; 326 scsi_qla_host_t *vha; 327 struct scsi_cmnd *cmd; 328 uint32_t *clr_ptr; 329 uint32_t handle; 330 cmd_entry_t *cmd_pkt; 331 uint16_t cnt; 332 uint16_t req_cnt; 333 uint16_t tot_dsds; 334 struct device_reg_2xxx __iomem *reg; 335 struct qla_hw_data *ha; 336 struct req_que *req; 337 struct rsp_que *rsp; 338 339 /* Setup device pointers. */ 340 vha = sp->vha; 341 ha = vha->hw; 342 reg = &ha->iobase->isp; 343 cmd = GET_CMD_SP(sp); 344 req = ha->req_q_map[0]; 345 rsp = ha->rsp_q_map[0]; 346 /* So we know we haven't pci_map'ed anything yet */ 347 tot_dsds = 0; 348 349 /* Send marker if required */ 350 if (vha->marker_needed != 0) { 351 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 352 QLA_SUCCESS) { 353 return (QLA_FUNCTION_FAILED); 354 } 355 vha->marker_needed = 0; 356 } 357 358 /* Acquire ring specific lock */ 359 spin_lock_irqsave(&ha->hardware_lock, flags); 360 361 handle = qla2xxx_get_next_handle(req); 362 if (handle == 0) 363 goto queuing_error; 364 365 /* Map the sg table so we have an accurate count of sg entries needed */ 366 if (scsi_sg_count(cmd)) { 367 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 368 scsi_sg_count(cmd), cmd->sc_data_direction); 369 if (unlikely(!nseg)) 370 goto queuing_error; 371 } else 372 nseg = 0; 373 374 tot_dsds = nseg; 375 376 /* Calculate the number of request entries needed. */ 377 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 378 if (req->cnt < (req_cnt + 2)) { 379 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 380 if (req->ring_index < cnt) 381 req->cnt = cnt - req->ring_index; 382 else 383 req->cnt = req->length - 384 (req->ring_index - cnt); 385 /* If still no head room then bail out */ 386 if (req->cnt < (req_cnt + 2)) 387 goto queuing_error; 388 } 389 390 /* Build command packet */ 391 req->current_outstanding_cmd = handle; 392 req->outstanding_cmds[handle] = sp; 393 sp->handle = handle; 394 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 395 req->cnt -= req_cnt; 396 397 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 398 cmd_pkt->handle = handle; 399 /* Zero out remaining portion of packet. */ 400 clr_ptr = (uint32_t *)cmd_pkt + 2; 401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 403 404 /* Set target ID and LUN number*/ 405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 406 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 407 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG); 408 409 /* Load SCSI command packet. */ 410 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 411 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 412 413 /* Build IOCB segments */ 414 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); 415 416 /* Set total data segment count. */ 417 cmd_pkt->entry_count = (uint8_t)req_cnt; 418 wmb(); 419 420 /* Adjust ring index. */ 421 req->ring_index++; 422 if (req->ring_index == req->length) { 423 req->ring_index = 0; 424 req->ring_ptr = req->ring; 425 } else 426 req->ring_ptr++; 427 428 sp->flags |= SRB_DMA_VALID; 429 430 /* Set chip new ring index. */ 431 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); 432 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 433 434 /* Manage unprocessed RIO/ZIO commands in response queue. */ 435 if (vha->flags.process_response_queue && 436 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 437 qla2x00_process_response_queue(rsp); 438 439 spin_unlock_irqrestore(&ha->hardware_lock, flags); 440 return (QLA_SUCCESS); 441 442 queuing_error: 443 if (tot_dsds) 444 scsi_dma_unmap(cmd); 445 446 spin_unlock_irqrestore(&ha->hardware_lock, flags); 447 448 return (QLA_FUNCTION_FAILED); 449 } 450 451 /** 452 * qla2x00_start_iocbs() - Execute the IOCB command 453 * @vha: HA context 454 * @req: request queue 455 */ 456 void 457 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 458 { 459 struct qla_hw_data *ha = vha->hw; 460 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 461 462 if (IS_P3P_TYPE(ha)) { 463 qla82xx_start_iocbs(vha); 464 } else { 465 /* Adjust ring index. */ 466 req->ring_index++; 467 if (req->ring_index == req->length) { 468 req->ring_index = 0; 469 req->ring_ptr = req->ring; 470 } else 471 req->ring_ptr++; 472 473 /* Set chip new ring index. */ 474 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 475 WRT_REG_DWORD(req->req_q_in, req->ring_index); 476 } else if (IS_QLA83XX(ha)) { 477 WRT_REG_DWORD(req->req_q_in, req->ring_index); 478 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 479 } else if (IS_QLAFX00(ha)) { 480 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); 481 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); 482 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); 483 } else if (IS_FWI2_CAPABLE(ha)) { 484 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); 485 RD_REG_DWORD_RELAXED(®->isp24.req_q_in); 486 } else { 487 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), 488 req->ring_index); 489 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); 490 } 491 } 492 } 493 494 /** 495 * qla2x00_marker() - Send a marker IOCB to the firmware. 496 * @vha: HA context 497 * @qpair: queue pair pointer 498 * @loop_id: loop ID 499 * @lun: LUN 500 * @type: marker modifier 501 * 502 * Can be called from both normal and interrupt context. 503 * 504 * Returns non-zero if a failure occurred, else zero. 505 */ 506 static int 507 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 508 uint16_t loop_id, uint64_t lun, uint8_t type) 509 { 510 mrk_entry_t *mrk; 511 struct mrk_entry_24xx *mrk24 = NULL; 512 struct req_que *req = qpair->req; 513 struct qla_hw_data *ha = vha->hw; 514 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 515 516 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); 517 if (mrk == NULL) { 518 ql_log(ql_log_warn, base_vha, 0x3026, 519 "Failed to allocate Marker IOCB.\n"); 520 521 return (QLA_FUNCTION_FAILED); 522 } 523 524 mrk->entry_type = MARKER_TYPE; 525 mrk->modifier = type; 526 if (type != MK_SYNC_ALL) { 527 if (IS_FWI2_CAPABLE(ha)) { 528 mrk24 = (struct mrk_entry_24xx *) mrk; 529 mrk24->nport_handle = cpu_to_le16(loop_id); 530 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); 531 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 532 mrk24->vp_index = vha->vp_idx; 533 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); 534 } else { 535 SET_TARGET_ID(ha, mrk->target, loop_id); 536 mrk->lun = cpu_to_le16((uint16_t)lun); 537 } 538 } 539 wmb(); 540 541 qla2x00_start_iocbs(vha, req); 542 543 return (QLA_SUCCESS); 544 } 545 546 int 547 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, 548 uint16_t loop_id, uint64_t lun, uint8_t type) 549 { 550 int ret; 551 unsigned long flags = 0; 552 553 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 554 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); 555 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 556 557 return (ret); 558 } 559 560 /* 561 * qla2x00_issue_marker 562 * 563 * Issue marker 564 * Caller CAN have hardware lock held as specified by ha_locked parameter. 565 * Might release it, then reaquire. 566 */ 567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) 568 { 569 if (ha_locked) { 570 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 571 MK_SYNC_ALL) != QLA_SUCCESS) 572 return QLA_FUNCTION_FAILED; 573 } else { 574 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, 575 MK_SYNC_ALL) != QLA_SUCCESS) 576 return QLA_FUNCTION_FAILED; 577 } 578 vha->marker_needed = 0; 579 580 return QLA_SUCCESS; 581 } 582 583 static inline int 584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, 585 uint16_t tot_dsds) 586 { 587 struct dsd64 *cur_dsd = NULL, *next_dsd; 588 scsi_qla_host_t *vha; 589 struct qla_hw_data *ha; 590 struct scsi_cmnd *cmd; 591 struct scatterlist *cur_seg; 592 uint8_t avail_dsds; 593 uint8_t first_iocb = 1; 594 uint32_t dsd_list_len; 595 struct dsd_dma *dsd_ptr; 596 struct ct6_dsd *ctx; 597 598 cmd = GET_CMD_SP(sp); 599 600 /* Update entry type to indicate Command Type 3 IOCB */ 601 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); 602 603 /* No data transfer */ 604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 605 cmd_pkt->byte_count = cpu_to_le32(0); 606 return 0; 607 } 608 609 vha = sp->vha; 610 ha = vha->hw; 611 612 /* Set transfer direction */ 613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 614 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); 615 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 616 vha->qla_stats.output_requests++; 617 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 618 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); 619 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 620 vha->qla_stats.input_requests++; 621 } 622 623 cur_seg = scsi_sglist(cmd); 624 ctx = sp->u.scmd.ct6_ctx; 625 626 while (tot_dsds) { 627 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 628 QLA_DSDS_PER_IOCB : tot_dsds; 629 tot_dsds -= avail_dsds; 630 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE; 631 632 dsd_ptr = list_first_entry(&ha->gbl_dsd_list, 633 struct dsd_dma, list); 634 next_dsd = dsd_ptr->dsd_addr; 635 list_del(&dsd_ptr->list); 636 ha->gbl_dsd_avail--; 637 list_add_tail(&dsd_ptr->list, &ctx->dsd_list); 638 ctx->dsd_use_cnt++; 639 ha->gbl_dsd_inuse++; 640 641 if (first_iocb) { 642 first_iocb = 0; 643 put_unaligned_le64(dsd_ptr->dsd_list_dma, 644 &cmd_pkt->fcp_dsd.address); 645 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len); 646 } else { 647 put_unaligned_le64(dsd_ptr->dsd_list_dma, 648 &cur_dsd->address); 649 cur_dsd->length = cpu_to_le32(dsd_list_len); 650 cur_dsd++; 651 } 652 cur_dsd = next_dsd; 653 while (avail_dsds) { 654 append_dsd64(&cur_dsd, cur_seg); 655 cur_seg = sg_next(cur_seg); 656 avail_dsds--; 657 } 658 } 659 660 /* Null termination */ 661 cur_dsd->address = 0; 662 cur_dsd->length = 0; 663 cur_dsd++; 664 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; 665 return 0; 666 } 667 668 /* 669 * qla24xx_calc_dsd_lists() - Determine number of DSD list required 670 * for Command Type 6. 671 * 672 * @dsds: number of data segment decriptors needed 673 * 674 * Returns the number of dsd list needed to store @dsds. 675 */ 676 static inline uint16_t 677 qla24xx_calc_dsd_lists(uint16_t dsds) 678 { 679 uint16_t dsd_lists = 0; 680 681 dsd_lists = (dsds/QLA_DSDS_PER_IOCB); 682 if (dsds % QLA_DSDS_PER_IOCB) 683 dsd_lists++; 684 return dsd_lists; 685 } 686 687 688 /** 689 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 690 * IOCB types. 691 * 692 * @sp: SRB command to process 693 * @cmd_pkt: Command type 3 IOCB 694 * @tot_dsds: Total number of segments to transfer 695 * @req: pointer to request queue 696 */ 697 inline void 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 699 uint16_t tot_dsds, struct req_que *req) 700 { 701 uint16_t avail_dsds; 702 struct dsd64 *cur_dsd; 703 scsi_qla_host_t *vha; 704 struct scsi_cmnd *cmd; 705 struct scatterlist *sg; 706 int i; 707 708 cmd = GET_CMD_SP(sp); 709 710 /* Update entry type to indicate Command Type 3 IOCB */ 711 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type); 712 713 /* No data transfer */ 714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 715 cmd_pkt->byte_count = cpu_to_le32(0); 716 return; 717 } 718 719 vha = sp->vha; 720 721 /* Set transfer direction */ 722 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA); 724 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 725 vha->qla_stats.output_requests++; 726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA); 728 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 729 vha->qla_stats.input_requests++; 730 } 731 732 /* One DSD is available in the Command Type 3 IOCB */ 733 avail_dsds = 1; 734 cur_dsd = &cmd_pkt->dsd; 735 736 /* Load data segments */ 737 738 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 739 cont_a64_entry_t *cont_pkt; 740 741 /* Allocate additional continuation packets? */ 742 if (avail_dsds == 0) { 743 /* 744 * Five DSDs are available in the Continuation 745 * Type 1 IOCB. 746 */ 747 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); 748 cur_dsd = cont_pkt->dsd; 749 avail_dsds = ARRAY_SIZE(cont_pkt->dsd); 750 } 751 752 append_dsd64(&cur_dsd, sg); 753 avail_dsds--; 754 } 755 } 756 757 struct fw_dif_context { 758 uint32_t ref_tag; 759 uint16_t app_tag; 760 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ 761 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ 762 }; 763 764 /* 765 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command 766 * 767 */ 768 static inline void 769 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 770 unsigned int protcnt) 771 { 772 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 773 774 switch (scsi_get_prot_type(cmd)) { 775 case SCSI_PROT_DIF_TYPE0: 776 /* 777 * No check for ql2xenablehba_err_chk, as it would be an 778 * I/O error if hba tag generation is not done. 779 */ 780 pkt->ref_tag = cpu_to_le32((uint32_t) 781 (0xffffffff & scsi_get_lba(cmd))); 782 783 if (!qla2x00_hba_err_chk_enabled(sp)) 784 break; 785 786 pkt->ref_tag_mask[0] = 0xff; 787 pkt->ref_tag_mask[1] = 0xff; 788 pkt->ref_tag_mask[2] = 0xff; 789 pkt->ref_tag_mask[3] = 0xff; 790 break; 791 792 /* 793 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to 794 * match LBA in CDB + N 795 */ 796 case SCSI_PROT_DIF_TYPE2: 797 pkt->app_tag = cpu_to_le16(0); 798 pkt->app_tag_mask[0] = 0x0; 799 pkt->app_tag_mask[1] = 0x0; 800 801 pkt->ref_tag = cpu_to_le32((uint32_t) 802 (0xffffffff & scsi_get_lba(cmd))); 803 804 if (!qla2x00_hba_err_chk_enabled(sp)) 805 break; 806 807 /* enable ALL bytes of the ref tag */ 808 pkt->ref_tag_mask[0] = 0xff; 809 pkt->ref_tag_mask[1] = 0xff; 810 pkt->ref_tag_mask[2] = 0xff; 811 pkt->ref_tag_mask[3] = 0xff; 812 break; 813 814 /* For Type 3 protection: 16 bit GUARD only */ 815 case SCSI_PROT_DIF_TYPE3: 816 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = 817 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = 818 0x00; 819 break; 820 821 /* 822 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and 823 * 16 bit app tag. 824 */ 825 case SCSI_PROT_DIF_TYPE1: 826 pkt->ref_tag = cpu_to_le32((uint32_t) 827 (0xffffffff & scsi_get_lba(cmd))); 828 pkt->app_tag = cpu_to_le16(0); 829 pkt->app_tag_mask[0] = 0x0; 830 pkt->app_tag_mask[1] = 0x0; 831 832 if (!qla2x00_hba_err_chk_enabled(sp)) 833 break; 834 835 /* enable ALL bytes of the ref tag */ 836 pkt->ref_tag_mask[0] = 0xff; 837 pkt->ref_tag_mask[1] = 0xff; 838 pkt->ref_tag_mask[2] = 0xff; 839 pkt->ref_tag_mask[3] = 0xff; 840 break; 841 } 842 } 843 844 int 845 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, 846 uint32_t *partial) 847 { 848 struct scatterlist *sg; 849 uint32_t cumulative_partial, sg_len; 850 dma_addr_t sg_dma_addr; 851 852 if (sgx->num_bytes == sgx->tot_bytes) 853 return 0; 854 855 sg = sgx->cur_sg; 856 cumulative_partial = sgx->tot_partial; 857 858 sg_dma_addr = sg_dma_address(sg); 859 sg_len = sg_dma_len(sg); 860 861 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; 862 863 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { 864 sgx->dma_len = (blk_sz - cumulative_partial); 865 sgx->tot_partial = 0; 866 sgx->num_bytes += blk_sz; 867 *partial = 0; 868 } else { 869 sgx->dma_len = sg_len - sgx->bytes_consumed; 870 sgx->tot_partial += sgx->dma_len; 871 *partial = 1; 872 } 873 874 sgx->bytes_consumed += sgx->dma_len; 875 876 if (sg_len == sgx->bytes_consumed) { 877 sg = sg_next(sg); 878 sgx->num_sg++; 879 sgx->cur_sg = sg; 880 sgx->bytes_consumed = 0; 881 } 882 883 return 1; 884 } 885 886 int 887 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, 888 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 889 { 890 void *next_dsd; 891 uint8_t avail_dsds = 0; 892 uint32_t dsd_list_len; 893 struct dsd_dma *dsd_ptr; 894 struct scatterlist *sg_prot; 895 struct dsd64 *cur_dsd = dsd; 896 uint16_t used_dsds = tot_dsds; 897 uint32_t prot_int; /* protection interval */ 898 uint32_t partial; 899 struct qla2_sgx sgx; 900 dma_addr_t sle_dma; 901 uint32_t sle_dma_len, tot_prot_dma_len = 0; 902 struct scsi_cmnd *cmd; 903 904 memset(&sgx, 0, sizeof(struct qla2_sgx)); 905 if (sp) { 906 cmd = GET_CMD_SP(sp); 907 prot_int = cmd->device->sector_size; 908 909 sgx.tot_bytes = scsi_bufflen(cmd); 910 sgx.cur_sg = scsi_sglist(cmd); 911 sgx.sp = sp; 912 913 sg_prot = scsi_prot_sglist(cmd); 914 } else if (tc) { 915 prot_int = tc->blk_sz; 916 sgx.tot_bytes = tc->bufflen; 917 sgx.cur_sg = tc->sg; 918 sg_prot = tc->prot_sg; 919 } else { 920 BUG(); 921 return 1; 922 } 923 924 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 925 926 sle_dma = sgx.dma_addr; 927 sle_dma_len = sgx.dma_len; 928 alloc_and_fill: 929 /* Allocate additional continuation packets? */ 930 if (avail_dsds == 0) { 931 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 932 QLA_DSDS_PER_IOCB : used_dsds; 933 dsd_list_len = (avail_dsds + 1) * 12; 934 used_dsds -= avail_dsds; 935 936 /* allocate tracking DS */ 937 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 938 if (!dsd_ptr) 939 return 1; 940 941 /* allocate new list */ 942 dsd_ptr->dsd_addr = next_dsd = 943 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 944 &dsd_ptr->dsd_list_dma); 945 946 if (!next_dsd) { 947 /* 948 * Need to cleanup only this dsd_ptr, rest 949 * will be done by sp_free_dma() 950 */ 951 kfree(dsd_ptr); 952 return 1; 953 } 954 955 if (sp) { 956 list_add_tail(&dsd_ptr->list, 957 &sp->u.scmd.crc_ctx->dsd_list); 958 959 sp->flags |= SRB_CRC_CTX_DSD_VALID; 960 } else { 961 list_add_tail(&dsd_ptr->list, 962 &(tc->ctx->dsd_list)); 963 *tc->ctx_dsd_alloced = 1; 964 } 965 966 967 /* add new list to cmd iocb or last list */ 968 put_unaligned_le64(dsd_ptr->dsd_list_dma, 969 &cur_dsd->address); 970 cur_dsd->length = cpu_to_le32(dsd_list_len); 971 cur_dsd = next_dsd; 972 } 973 put_unaligned_le64(sle_dma, &cur_dsd->address); 974 cur_dsd->length = cpu_to_le32(sle_dma_len); 975 cur_dsd++; 976 avail_dsds--; 977 978 if (partial == 0) { 979 /* Got a full protection interval */ 980 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; 981 sle_dma_len = 8; 982 983 tot_prot_dma_len += sle_dma_len; 984 if (tot_prot_dma_len == sg_dma_len(sg_prot)) { 985 tot_prot_dma_len = 0; 986 sg_prot = sg_next(sg_prot); 987 } 988 989 partial = 1; /* So as to not re-enter this block */ 990 goto alloc_and_fill; 991 } 992 } 993 /* Null termination */ 994 cur_dsd->address = 0; 995 cur_dsd->length = 0; 996 cur_dsd++; 997 return 0; 998 } 999 1000 int 1001 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, 1002 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) 1003 { 1004 void *next_dsd; 1005 uint8_t avail_dsds = 0; 1006 uint32_t dsd_list_len; 1007 struct dsd_dma *dsd_ptr; 1008 struct scatterlist *sg, *sgl; 1009 struct dsd64 *cur_dsd = dsd; 1010 int i; 1011 uint16_t used_dsds = tot_dsds; 1012 struct scsi_cmnd *cmd; 1013 1014 if (sp) { 1015 cmd = GET_CMD_SP(sp); 1016 sgl = scsi_sglist(cmd); 1017 } else if (tc) { 1018 sgl = tc->sg; 1019 } else { 1020 BUG(); 1021 return 1; 1022 } 1023 1024 1025 for_each_sg(sgl, sg, tot_dsds, i) { 1026 /* Allocate additional continuation packets? */ 1027 if (avail_dsds == 0) { 1028 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1029 QLA_DSDS_PER_IOCB : used_dsds; 1030 dsd_list_len = (avail_dsds + 1) * 12; 1031 used_dsds -= avail_dsds; 1032 1033 /* allocate tracking DS */ 1034 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 1035 if (!dsd_ptr) 1036 return 1; 1037 1038 /* allocate new list */ 1039 dsd_ptr->dsd_addr = next_dsd = 1040 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1041 &dsd_ptr->dsd_list_dma); 1042 1043 if (!next_dsd) { 1044 /* 1045 * Need to cleanup only this dsd_ptr, rest 1046 * will be done by sp_free_dma() 1047 */ 1048 kfree(dsd_ptr); 1049 return 1; 1050 } 1051 1052 if (sp) { 1053 list_add_tail(&dsd_ptr->list, 1054 &sp->u.scmd.crc_ctx->dsd_list); 1055 1056 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1057 } else { 1058 list_add_tail(&dsd_ptr->list, 1059 &(tc->ctx->dsd_list)); 1060 *tc->ctx_dsd_alloced = 1; 1061 } 1062 1063 /* add new list to cmd iocb or last list */ 1064 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1065 &cur_dsd->address); 1066 cur_dsd->length = cpu_to_le32(dsd_list_len); 1067 cur_dsd = next_dsd; 1068 } 1069 append_dsd64(&cur_dsd, sg); 1070 avail_dsds--; 1071 1072 } 1073 /* Null termination */ 1074 cur_dsd->address = 0; 1075 cur_dsd->length = 0; 1076 cur_dsd++; 1077 return 0; 1078 } 1079 1080 int 1081 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, 1082 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) 1083 { 1084 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; 1085 struct scatterlist *sg, *sgl; 1086 struct crc_context *difctx = NULL; 1087 struct scsi_qla_host *vha; 1088 uint dsd_list_len; 1089 uint avail_dsds = 0; 1090 uint used_dsds = tot_dsds; 1091 bool dif_local_dma_alloc = false; 1092 bool direction_to_device = false; 1093 int i; 1094 1095 if (sp) { 1096 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1097 1098 sgl = scsi_prot_sglist(cmd); 1099 vha = sp->vha; 1100 difctx = sp->u.scmd.crc_ctx; 1101 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; 1102 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1103 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", 1104 __func__, cmd, difctx, sp); 1105 } else if (tc) { 1106 vha = tc->vha; 1107 sgl = tc->prot_sg; 1108 difctx = tc->ctx; 1109 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; 1110 } else { 1111 BUG(); 1112 return 1; 1113 } 1114 1115 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, 1116 "%s: enter (write=%u)\n", __func__, direction_to_device); 1117 1118 /* if initiator doing write or target doing read */ 1119 if (direction_to_device) { 1120 for_each_sg(sgl, sg, tot_dsds, i) { 1121 u64 sle_phys = sg_phys(sg); 1122 1123 /* If SGE addr + len flips bits in upper 32-bits */ 1124 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1125 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, 1126 "%s: page boundary crossing (phys=%llx len=%x)\n", 1127 __func__, sle_phys, sg->length); 1128 1129 if (difctx) { 1130 ha->dif_bundle_crossed_pages++; 1131 dif_local_dma_alloc = true; 1132 } else { 1133 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1134 vha, 0xe022, 1135 "%s: difctx pointer is NULL\n", 1136 __func__); 1137 } 1138 break; 1139 } 1140 } 1141 ha->dif_bundle_writes++; 1142 } else { 1143 ha->dif_bundle_reads++; 1144 } 1145 1146 if (ql2xdifbundlinginternalbuffers) 1147 dif_local_dma_alloc = direction_to_device; 1148 1149 if (dif_local_dma_alloc) { 1150 u32 track_difbundl_buf = 0; 1151 u32 ldma_sg_len = 0; 1152 u8 ldma_needed = 1; 1153 1154 difctx->no_dif_bundl = 0; 1155 difctx->dif_bundl_len = 0; 1156 1157 /* Track DSD buffers */ 1158 INIT_LIST_HEAD(&difctx->ldif_dsd_list); 1159 /* Track local DMA buffers */ 1160 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); 1161 1162 for_each_sg(sgl, sg, tot_dsds, i) { 1163 u32 sglen = sg_dma_len(sg); 1164 1165 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1166 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1167 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len, 1168 difctx->dif_bundl_len, ldma_needed); 1169 1170 while (sglen) { 1171 u32 xfrlen = 0; 1172 1173 if (ldma_needed) { 1174 /* 1175 * Allocate list item to store 1176 * the DMA buffers 1177 */ 1178 dsd_ptr = kzalloc(sizeof(*dsd_ptr), 1179 GFP_ATOMIC); 1180 if (!dsd_ptr) { 1181 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1182 "%s: failed alloc dsd_ptr\n", 1183 __func__); 1184 return 1; 1185 } 1186 ha->dif_bundle_kallocs++; 1187 1188 /* allocate dma buffer */ 1189 dsd_ptr->dsd_addr = dma_pool_alloc 1190 (ha->dif_bundl_pool, GFP_ATOMIC, 1191 &dsd_ptr->dsd_list_dma); 1192 if (!dsd_ptr->dsd_addr) { 1193 ql_dbg(ql_dbg_tgt, vha, 0xe024, 1194 "%s: failed alloc ->dsd_ptr\n", 1195 __func__); 1196 /* 1197 * need to cleanup only this 1198 * dsd_ptr rest will be done 1199 * by sp_free_dma() 1200 */ 1201 kfree(dsd_ptr); 1202 ha->dif_bundle_kallocs--; 1203 return 1; 1204 } 1205 ha->dif_bundle_dma_allocs++; 1206 ldma_needed = 0; 1207 difctx->no_dif_bundl++; 1208 list_add_tail(&dsd_ptr->list, 1209 &difctx->ldif_dma_hndl_list); 1210 } 1211 1212 /* xfrlen is min of dma pool size and sglen */ 1213 xfrlen = (sglen > 1214 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? 1215 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : 1216 sglen; 1217 1218 /* replace with local allocated dma buffer */ 1219 sg_pcopy_to_buffer(sgl, sg_nents(sgl), 1220 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, 1221 difctx->dif_bundl_len); 1222 difctx->dif_bundl_len += xfrlen; 1223 sglen -= xfrlen; 1224 ldma_sg_len += xfrlen; 1225 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || 1226 sg_is_last(sg)) { 1227 ldma_needed = 1; 1228 ldma_sg_len = 0; 1229 } 1230 } 1231 } 1232 1233 track_difbundl_buf = used_dsds = difctx->no_dif_bundl; 1234 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, 1235 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", 1236 difctx->dif_bundl_len, difctx->no_dif_bundl, 1237 track_difbundl_buf); 1238 1239 if (sp) 1240 sp->flags |= SRB_DIF_BUNDL_DMA_VALID; 1241 else 1242 tc->prot_flags = DIF_BUNDL_DMA_VALID; 1243 1244 list_for_each_entry_safe(dif_dsd, nxt_dsd, 1245 &difctx->ldif_dma_hndl_list, list) { 1246 u32 sglen = (difctx->dif_bundl_len > 1247 DIF_BUNDLING_DMA_POOL_SIZE) ? 1248 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; 1249 1250 BUG_ON(track_difbundl_buf == 0); 1251 1252 /* Allocate additional continuation packets? */ 1253 if (avail_dsds == 0) { 1254 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 1255 0xe024, 1256 "%s: adding continuation iocb's\n", 1257 __func__); 1258 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1259 QLA_DSDS_PER_IOCB : used_dsds; 1260 dsd_list_len = (avail_dsds + 1) * 12; 1261 used_dsds -= avail_dsds; 1262 1263 /* allocate tracking DS */ 1264 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1265 if (!dsd_ptr) { 1266 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1267 "%s: failed alloc dsd_ptr\n", 1268 __func__); 1269 return 1; 1270 } 1271 ha->dif_bundle_kallocs++; 1272 1273 difctx->no_ldif_dsd++; 1274 /* allocate new list */ 1275 dsd_ptr->dsd_addr = 1276 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1277 &dsd_ptr->dsd_list_dma); 1278 if (!dsd_ptr->dsd_addr) { 1279 ql_dbg(ql_dbg_tgt, vha, 0xe026, 1280 "%s: failed alloc ->dsd_addr\n", 1281 __func__); 1282 /* 1283 * need to cleanup only this dsd_ptr 1284 * rest will be done by sp_free_dma() 1285 */ 1286 kfree(dsd_ptr); 1287 ha->dif_bundle_kallocs--; 1288 return 1; 1289 } 1290 ha->dif_bundle_dma_allocs++; 1291 1292 if (sp) { 1293 list_add_tail(&dsd_ptr->list, 1294 &difctx->ldif_dsd_list); 1295 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1296 } else { 1297 list_add_tail(&dsd_ptr->list, 1298 &difctx->ldif_dsd_list); 1299 tc->ctx_dsd_alloced = 1; 1300 } 1301 1302 /* add new list to cmd iocb or last list */ 1303 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1304 &cur_dsd->address); 1305 cur_dsd->length = cpu_to_le32(dsd_list_len); 1306 cur_dsd = dsd_ptr->dsd_addr; 1307 } 1308 put_unaligned_le64(dif_dsd->dsd_list_dma, 1309 &cur_dsd->address); 1310 cur_dsd->length = cpu_to_le32(sglen); 1311 cur_dsd++; 1312 avail_dsds--; 1313 difctx->dif_bundl_len -= sglen; 1314 track_difbundl_buf--; 1315 } 1316 1317 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, 1318 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, 1319 difctx->no_ldif_dsd, difctx->no_dif_bundl); 1320 } else { 1321 for_each_sg(sgl, sg, tot_dsds, i) { 1322 /* Allocate additional continuation packets? */ 1323 if (avail_dsds == 0) { 1324 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? 1325 QLA_DSDS_PER_IOCB : used_dsds; 1326 dsd_list_len = (avail_dsds + 1) * 12; 1327 used_dsds -= avail_dsds; 1328 1329 /* allocate tracking DS */ 1330 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); 1331 if (!dsd_ptr) { 1332 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, 1333 vha, 0xe027, 1334 "%s: failed alloc dsd_dma...\n", 1335 __func__); 1336 return 1; 1337 } 1338 1339 /* allocate new list */ 1340 dsd_ptr->dsd_addr = 1341 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, 1342 &dsd_ptr->dsd_list_dma); 1343 if (!dsd_ptr->dsd_addr) { 1344 /* need to cleanup only this dsd_ptr */ 1345 /* rest will be done by sp_free_dma() */ 1346 kfree(dsd_ptr); 1347 return 1; 1348 } 1349 1350 if (sp) { 1351 list_add_tail(&dsd_ptr->list, 1352 &difctx->dsd_list); 1353 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1354 } else { 1355 list_add_tail(&dsd_ptr->list, 1356 &difctx->dsd_list); 1357 tc->ctx_dsd_alloced = 1; 1358 } 1359 1360 /* add new list to cmd iocb or last list */ 1361 put_unaligned_le64(dsd_ptr->dsd_list_dma, 1362 &cur_dsd->address); 1363 cur_dsd->length = cpu_to_le32(dsd_list_len); 1364 cur_dsd = dsd_ptr->dsd_addr; 1365 } 1366 append_dsd64(&cur_dsd, sg); 1367 avail_dsds--; 1368 } 1369 } 1370 /* Null termination */ 1371 cur_dsd->address = 0; 1372 cur_dsd->length = 0; 1373 cur_dsd++; 1374 return 0; 1375 } 1376 1377 /** 1378 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command 1379 * Type 6 IOCB types. 1380 * 1381 * @sp: SRB command to process 1382 * @cmd_pkt: Command type 3 IOCB 1383 * @tot_dsds: Total number of segments to transfer 1384 * @tot_prot_dsds: Total number of segments with protection information 1385 * @fw_prot_opts: Protection options to be passed to firmware 1386 */ 1387 static inline int 1388 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, 1389 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) 1390 { 1391 struct dsd64 *cur_dsd; 1392 uint32_t *fcp_dl; 1393 scsi_qla_host_t *vha; 1394 struct scsi_cmnd *cmd; 1395 uint32_t total_bytes = 0; 1396 uint32_t data_bytes; 1397 uint32_t dif_bytes; 1398 uint8_t bundling = 1; 1399 uint16_t blk_size; 1400 struct crc_context *crc_ctx_pkt = NULL; 1401 struct qla_hw_data *ha; 1402 uint8_t additional_fcpcdb_len; 1403 uint16_t fcp_cmnd_len; 1404 struct fcp_cmnd *fcp_cmnd; 1405 dma_addr_t crc_ctx_dma; 1406 1407 cmd = GET_CMD_SP(sp); 1408 1409 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1410 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type); 1411 1412 vha = sp->vha; 1413 ha = vha->hw; 1414 1415 /* No data transfer */ 1416 data_bytes = scsi_bufflen(cmd); 1417 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1418 cmd_pkt->byte_count = cpu_to_le32(0); 1419 return QLA_SUCCESS; 1420 } 1421 1422 cmd_pkt->vp_index = sp->vha->vp_idx; 1423 1424 /* Set transfer direction */ 1425 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1426 cmd_pkt->control_flags = 1427 cpu_to_le16(CF_WRITE_DATA); 1428 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1429 cmd_pkt->control_flags = 1430 cpu_to_le16(CF_READ_DATA); 1431 } 1432 1433 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1434 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || 1435 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || 1436 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT)) 1437 bundling = 0; 1438 1439 /* Allocate CRC context from global pool */ 1440 crc_ctx_pkt = sp->u.scmd.crc_ctx = 1441 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 1442 1443 if (!crc_ctx_pkt) 1444 goto crc_queuing_error; 1445 1446 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 1447 1448 sp->flags |= SRB_CRC_CTX_DMA_VALID; 1449 1450 /* Set handle */ 1451 crc_ctx_pkt->handle = cmd_pkt->handle; 1452 1453 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 1454 1455 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *) 1456 &crc_ctx_pkt->ref_tag, tot_prot_dsds); 1457 1458 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); 1459 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 1460 1461 /* Determine SCSI command length -- align to 4 byte boundary */ 1462 if (cmd->cmd_len > 16) { 1463 additional_fcpcdb_len = cmd->cmd_len - 16; 1464 if ((cmd->cmd_len % 4) != 0) { 1465 /* SCSI cmd > 16 bytes must be multiple of 4 */ 1466 goto crc_queuing_error; 1467 } 1468 fcp_cmnd_len = 12 + cmd->cmd_len + 4; 1469 } else { 1470 additional_fcpcdb_len = 0; 1471 fcp_cmnd_len = 12 + 16 + 4; 1472 } 1473 1474 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; 1475 1476 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; 1477 if (cmd->sc_data_direction == DMA_TO_DEVICE) 1478 fcp_cmnd->additional_cdb_len |= 1; 1479 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1480 fcp_cmnd->additional_cdb_len |= 2; 1481 1482 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun); 1483 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1484 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1485 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, 1486 &cmd_pkt->fcp_cmnd_dseg_address); 1487 fcp_cmnd->task_management = 0; 1488 fcp_cmnd->task_attribute = TSK_SIMPLE; 1489 1490 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ 1491 1492 /* Compute dif len and adjust data len to incude protection */ 1493 dif_bytes = 0; 1494 blk_size = cmd->device->sector_size; 1495 dif_bytes = (data_bytes / blk_size) * 8; 1496 1497 switch (scsi_get_prot_op(GET_CMD_SP(sp))) { 1498 case SCSI_PROT_READ_INSERT: 1499 case SCSI_PROT_WRITE_STRIP: 1500 total_bytes = data_bytes; 1501 data_bytes += dif_bytes; 1502 break; 1503 1504 case SCSI_PROT_READ_STRIP: 1505 case SCSI_PROT_WRITE_INSERT: 1506 case SCSI_PROT_READ_PASS: 1507 case SCSI_PROT_WRITE_PASS: 1508 total_bytes = data_bytes + dif_bytes; 1509 break; 1510 default: 1511 BUG(); 1512 } 1513 1514 if (!qla2x00_hba_err_chk_enabled(sp)) 1515 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1516 /* HBA error checking enabled */ 1517 else if (IS_PI_UNINIT_CAPABLE(ha)) { 1518 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1) 1519 || (scsi_get_prot_type(GET_CMD_SP(sp)) == 1520 SCSI_PROT_DIF_TYPE2)) 1521 fw_prot_opts |= BIT_10; 1522 else if (scsi_get_prot_type(GET_CMD_SP(sp)) == 1523 SCSI_PROT_DIF_TYPE3) 1524 fw_prot_opts |= BIT_11; 1525 } 1526 1527 if (!bundling) { 1528 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; 1529 } else { 1530 /* 1531 * Configure Bundling if we need to fetch interlaving 1532 * protection PCI accesses 1533 */ 1534 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 1535 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 1536 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - 1537 tot_prot_dsds); 1538 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; 1539 } 1540 1541 /* Finish the common fields of CRC pkt */ 1542 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1543 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1544 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1545 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 1546 /* Fibre channel byte count */ 1547 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1548 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1549 additional_fcpcdb_len); 1550 *fcp_dl = htonl(total_bytes); 1551 1552 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1553 cmd_pkt->byte_count = cpu_to_le32(0); 1554 return QLA_SUCCESS; 1555 } 1556 /* Walks data segments */ 1557 1558 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); 1559 1560 if (!bundling && tot_prot_dsds) { 1561 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1562 cur_dsd, tot_dsds, NULL)) 1563 goto crc_queuing_error; 1564 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, 1565 (tot_dsds - tot_prot_dsds), NULL)) 1566 goto crc_queuing_error; 1567 1568 if (bundling && tot_prot_dsds) { 1569 /* Walks dif segments */ 1570 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); 1571 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd; 1572 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1573 tot_prot_dsds, NULL)) 1574 goto crc_queuing_error; 1575 } 1576 return QLA_SUCCESS; 1577 1578 crc_queuing_error: 1579 /* Cleanup will be performed by the caller */ 1580 1581 return QLA_FUNCTION_FAILED; 1582 } 1583 1584 /** 1585 * qla24xx_start_scsi() - Send a SCSI command to the ISP 1586 * @sp: command to send to the ISP 1587 * 1588 * Returns non-zero if a failure occurred, else zero. 1589 */ 1590 int 1591 qla24xx_start_scsi(srb_t *sp) 1592 { 1593 int nseg; 1594 unsigned long flags; 1595 uint32_t *clr_ptr; 1596 uint32_t handle; 1597 struct cmd_type_7 *cmd_pkt; 1598 uint16_t cnt; 1599 uint16_t req_cnt; 1600 uint16_t tot_dsds; 1601 struct req_que *req = NULL; 1602 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1603 struct scsi_qla_host *vha = sp->vha; 1604 struct qla_hw_data *ha = vha->hw; 1605 1606 /* Setup device pointers. */ 1607 req = vha->req; 1608 1609 /* So we know we haven't pci_map'ed anything yet */ 1610 tot_dsds = 0; 1611 1612 /* Send marker if required */ 1613 if (vha->marker_needed != 0) { 1614 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1615 QLA_SUCCESS) 1616 return QLA_FUNCTION_FAILED; 1617 vha->marker_needed = 0; 1618 } 1619 1620 /* Acquire ring specific lock */ 1621 spin_lock_irqsave(&ha->hardware_lock, flags); 1622 1623 handle = qla2xxx_get_next_handle(req); 1624 if (handle == 0) 1625 goto queuing_error; 1626 1627 /* Map the sg table so we have an accurate count of sg entries needed */ 1628 if (scsi_sg_count(cmd)) { 1629 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1630 scsi_sg_count(cmd), cmd->sc_data_direction); 1631 if (unlikely(!nseg)) 1632 goto queuing_error; 1633 } else 1634 nseg = 0; 1635 1636 tot_dsds = nseg; 1637 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1638 if (req->cnt < (req_cnt + 2)) { 1639 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1640 RD_REG_DWORD_RELAXED(req->req_q_out); 1641 if (req->ring_index < cnt) 1642 req->cnt = cnt - req->ring_index; 1643 else 1644 req->cnt = req->length - 1645 (req->ring_index - cnt); 1646 if (req->cnt < (req_cnt + 2)) 1647 goto queuing_error; 1648 } 1649 1650 /* Build command packet. */ 1651 req->current_outstanding_cmd = handle; 1652 req->outstanding_cmds[handle] = sp; 1653 sp->handle = handle; 1654 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1655 req->cnt -= req_cnt; 1656 1657 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1658 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1659 1660 /* Zero out remaining portion of packet. */ 1661 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1662 clr_ptr = (uint32_t *)cmd_pkt + 2; 1663 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1665 1666 /* Set NPORT-ID and LUN number*/ 1667 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1668 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1669 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1670 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1671 cmd_pkt->vp_index = sp->vha->vp_idx; 1672 1673 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1674 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1675 1676 cmd_pkt->task = TSK_SIMPLE; 1677 1678 /* Load SCSI command packet. */ 1679 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 1680 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 1681 1682 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 1683 1684 /* Build IOCB segments */ 1685 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 1686 1687 /* Set total data segment count. */ 1688 cmd_pkt->entry_count = (uint8_t)req_cnt; 1689 wmb(); 1690 /* Adjust ring index. */ 1691 req->ring_index++; 1692 if (req->ring_index == req->length) { 1693 req->ring_index = 0; 1694 req->ring_ptr = req->ring; 1695 } else 1696 req->ring_ptr++; 1697 1698 sp->flags |= SRB_DMA_VALID; 1699 1700 /* Set chip new ring index. */ 1701 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1702 1703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1704 return QLA_SUCCESS; 1705 1706 queuing_error: 1707 if (tot_dsds) 1708 scsi_dma_unmap(cmd); 1709 1710 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1711 1712 return QLA_FUNCTION_FAILED; 1713 } 1714 1715 /** 1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP 1717 * @sp: command to send to the ISP 1718 * 1719 * Returns non-zero if a failure occurred, else zero. 1720 */ 1721 int 1722 qla24xx_dif_start_scsi(srb_t *sp) 1723 { 1724 int nseg; 1725 unsigned long flags; 1726 uint32_t *clr_ptr; 1727 uint32_t handle; 1728 uint16_t cnt; 1729 uint16_t req_cnt = 0; 1730 uint16_t tot_dsds; 1731 uint16_t tot_prot_dsds; 1732 uint16_t fw_prot_opts = 0; 1733 struct req_que *req = NULL; 1734 struct rsp_que *rsp = NULL; 1735 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1736 struct scsi_qla_host *vha = sp->vha; 1737 struct qla_hw_data *ha = vha->hw; 1738 struct cmd_type_crc_2 *cmd_pkt; 1739 uint32_t status = 0; 1740 1741 #define QDSS_GOT_Q_SPACE BIT_0 1742 1743 /* Only process protection or >16 cdb in this routine */ 1744 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 1745 if (cmd->cmd_len <= 16) 1746 return qla24xx_start_scsi(sp); 1747 } 1748 1749 /* Setup device pointers. */ 1750 req = vha->req; 1751 rsp = req->rsp; 1752 1753 /* So we know we haven't pci_map'ed anything yet */ 1754 tot_dsds = 0; 1755 1756 /* Send marker if required */ 1757 if (vha->marker_needed != 0) { 1758 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != 1759 QLA_SUCCESS) 1760 return QLA_FUNCTION_FAILED; 1761 vha->marker_needed = 0; 1762 } 1763 1764 /* Acquire ring specific lock */ 1765 spin_lock_irqsave(&ha->hardware_lock, flags); 1766 1767 handle = qla2xxx_get_next_handle(req); 1768 if (handle == 0) 1769 goto queuing_error; 1770 1771 /* Compute number of required data segments */ 1772 /* Map the sg table so we have an accurate count of sg entries needed */ 1773 if (scsi_sg_count(cmd)) { 1774 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1775 scsi_sg_count(cmd), cmd->sc_data_direction); 1776 if (unlikely(!nseg)) 1777 goto queuing_error; 1778 else 1779 sp->flags |= SRB_DMA_VALID; 1780 1781 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1782 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1783 struct qla2_sgx sgx; 1784 uint32_t partial; 1785 1786 memset(&sgx, 0, sizeof(struct qla2_sgx)); 1787 sgx.tot_bytes = scsi_bufflen(cmd); 1788 sgx.cur_sg = scsi_sglist(cmd); 1789 sgx.sp = sp; 1790 1791 nseg = 0; 1792 while (qla24xx_get_one_block_sg( 1793 cmd->device->sector_size, &sgx, &partial)) 1794 nseg++; 1795 } 1796 } else 1797 nseg = 0; 1798 1799 /* number of required data segments */ 1800 tot_dsds = nseg; 1801 1802 /* Compute number of required protection segments */ 1803 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 1804 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 1805 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 1806 if (unlikely(!nseg)) 1807 goto queuing_error; 1808 else 1809 sp->flags |= SRB_CRC_PROT_DMA_VALID; 1810 1811 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1812 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 1813 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 1814 } 1815 } else { 1816 nseg = 0; 1817 } 1818 1819 req_cnt = 1; 1820 /* Total Data and protection sg segment(s) */ 1821 tot_prot_dsds = nseg; 1822 tot_dsds += nseg; 1823 if (req->cnt < (req_cnt + 2)) { 1824 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1825 RD_REG_DWORD_RELAXED(req->req_q_out); 1826 if (req->ring_index < cnt) 1827 req->cnt = cnt - req->ring_index; 1828 else 1829 req->cnt = req->length - 1830 (req->ring_index - cnt); 1831 if (req->cnt < (req_cnt + 2)) 1832 goto queuing_error; 1833 } 1834 1835 status |= QDSS_GOT_Q_SPACE; 1836 1837 /* Build header part of command packet (excluding the OPCODE). */ 1838 req->current_outstanding_cmd = handle; 1839 req->outstanding_cmds[handle] = sp; 1840 sp->handle = handle; 1841 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1842 req->cnt -= req_cnt; 1843 1844 /* Fill-in common area */ 1845 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 1846 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1847 1848 clr_ptr = (uint32_t *)cmd_pkt + 2; 1849 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1850 1851 /* Set NPORT-ID and LUN number*/ 1852 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1853 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1854 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1855 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1856 1857 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1858 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1859 1860 /* Total Data and protection segment(s) */ 1861 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1862 1863 /* Build IOCB segments and adjust for data protection segments */ 1864 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 1865 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 1866 QLA_SUCCESS) 1867 goto queuing_error; 1868 1869 cmd_pkt->entry_count = (uint8_t)req_cnt; 1870 /* Specify response queue number where completion should happen */ 1871 cmd_pkt->entry_status = (uint8_t) rsp->id; 1872 cmd_pkt->timeout = cpu_to_le16(0); 1873 wmb(); 1874 1875 /* Adjust ring index. */ 1876 req->ring_index++; 1877 if (req->ring_index == req->length) { 1878 req->ring_index = 0; 1879 req->ring_ptr = req->ring; 1880 } else 1881 req->ring_ptr++; 1882 1883 /* Set chip new ring index. */ 1884 WRT_REG_DWORD(req->req_q_in, req->ring_index); 1885 1886 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1887 1888 return QLA_SUCCESS; 1889 1890 queuing_error: 1891 if (status & QDSS_GOT_Q_SPACE) { 1892 req->outstanding_cmds[handle] = NULL; 1893 req->cnt += req_cnt; 1894 } 1895 /* Cleanup will be performed by the caller (queuecommand) */ 1896 1897 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1898 return QLA_FUNCTION_FAILED; 1899 } 1900 1901 /** 1902 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP 1903 * @sp: command to send to the ISP 1904 * 1905 * Returns non-zero if a failure occurred, else zero. 1906 */ 1907 static int 1908 qla2xxx_start_scsi_mq(srb_t *sp) 1909 { 1910 int nseg; 1911 unsigned long flags; 1912 uint32_t *clr_ptr; 1913 uint32_t handle; 1914 struct cmd_type_7 *cmd_pkt; 1915 uint16_t cnt; 1916 uint16_t req_cnt; 1917 uint16_t tot_dsds; 1918 struct req_que *req = NULL; 1919 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1920 struct scsi_qla_host *vha = sp->fcport->vha; 1921 struct qla_hw_data *ha = vha->hw; 1922 struct qla_qpair *qpair = sp->qpair; 1923 1924 /* Acquire qpair specific lock */ 1925 spin_lock_irqsave(&qpair->qp_lock, flags); 1926 1927 /* Setup qpair pointers */ 1928 req = qpair->req; 1929 1930 /* So we know we haven't pci_map'ed anything yet */ 1931 tot_dsds = 0; 1932 1933 /* Send marker if required */ 1934 if (vha->marker_needed != 0) { 1935 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 1936 QLA_SUCCESS) { 1937 spin_unlock_irqrestore(&qpair->qp_lock, flags); 1938 return QLA_FUNCTION_FAILED; 1939 } 1940 vha->marker_needed = 0; 1941 } 1942 1943 handle = qla2xxx_get_next_handle(req); 1944 if (handle == 0) 1945 goto queuing_error; 1946 1947 /* Map the sg table so we have an accurate count of sg entries needed */ 1948 if (scsi_sg_count(cmd)) { 1949 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 1950 scsi_sg_count(cmd), cmd->sc_data_direction); 1951 if (unlikely(!nseg)) 1952 goto queuing_error; 1953 } else 1954 nseg = 0; 1955 1956 tot_dsds = nseg; 1957 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 1958 if (req->cnt < (req_cnt + 2)) { 1959 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 1960 RD_REG_DWORD_RELAXED(req->req_q_out); 1961 if (req->ring_index < cnt) 1962 req->cnt = cnt - req->ring_index; 1963 else 1964 req->cnt = req->length - 1965 (req->ring_index - cnt); 1966 if (req->cnt < (req_cnt + 2)) 1967 goto queuing_error; 1968 } 1969 1970 /* Build command packet. */ 1971 req->current_outstanding_cmd = handle; 1972 req->outstanding_cmds[handle] = sp; 1973 sp->handle = handle; 1974 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1975 req->cnt -= req_cnt; 1976 1977 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1978 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 1979 1980 /* Zero out remaining portion of packet. */ 1981 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1982 clr_ptr = (uint32_t *)cmd_pkt + 2; 1983 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 1984 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 1985 1986 /* Set NPORT-ID and LUN number*/ 1987 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1988 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1989 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1990 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1991 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 1992 1993 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1994 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1995 1996 cmd_pkt->task = TSK_SIMPLE; 1997 1998 /* Load SCSI command packet. */ 1999 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 2000 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 2001 2002 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 2003 2004 /* Build IOCB segments */ 2005 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 2006 2007 /* Set total data segment count. */ 2008 cmd_pkt->entry_count = (uint8_t)req_cnt; 2009 wmb(); 2010 /* Adjust ring index. */ 2011 req->ring_index++; 2012 if (req->ring_index == req->length) { 2013 req->ring_index = 0; 2014 req->ring_ptr = req->ring; 2015 } else 2016 req->ring_ptr++; 2017 2018 sp->flags |= SRB_DMA_VALID; 2019 2020 /* Set chip new ring index. */ 2021 WRT_REG_DWORD(req->req_q_in, req->ring_index); 2022 2023 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2024 return QLA_SUCCESS; 2025 2026 queuing_error: 2027 if (tot_dsds) 2028 scsi_dma_unmap(cmd); 2029 2030 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2031 2032 return QLA_FUNCTION_FAILED; 2033 } 2034 2035 2036 /** 2037 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP 2038 * @sp: command to send to the ISP 2039 * 2040 * Returns non-zero if a failure occurred, else zero. 2041 */ 2042 int 2043 qla2xxx_dif_start_scsi_mq(srb_t *sp) 2044 { 2045 int nseg; 2046 unsigned long flags; 2047 uint32_t *clr_ptr; 2048 uint32_t handle; 2049 uint16_t cnt; 2050 uint16_t req_cnt = 0; 2051 uint16_t tot_dsds; 2052 uint16_t tot_prot_dsds; 2053 uint16_t fw_prot_opts = 0; 2054 struct req_que *req = NULL; 2055 struct rsp_que *rsp = NULL; 2056 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2057 struct scsi_qla_host *vha = sp->fcport->vha; 2058 struct qla_hw_data *ha = vha->hw; 2059 struct cmd_type_crc_2 *cmd_pkt; 2060 uint32_t status = 0; 2061 struct qla_qpair *qpair = sp->qpair; 2062 2063 #define QDSS_GOT_Q_SPACE BIT_0 2064 2065 /* Check for host side state */ 2066 if (!qpair->online) { 2067 cmd->result = DID_NO_CONNECT << 16; 2068 return QLA_INTERFACE_ERROR; 2069 } 2070 2071 if (!qpair->difdix_supported && 2072 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2073 cmd->result = DID_NO_CONNECT << 16; 2074 return QLA_INTERFACE_ERROR; 2075 } 2076 2077 /* Only process protection or >16 cdb in this routine */ 2078 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) { 2079 if (cmd->cmd_len <= 16) 2080 return qla2xxx_start_scsi_mq(sp); 2081 } 2082 2083 spin_lock_irqsave(&qpair->qp_lock, flags); 2084 2085 /* Setup qpair pointers */ 2086 rsp = qpair->rsp; 2087 req = qpair->req; 2088 2089 /* So we know we haven't pci_map'ed anything yet */ 2090 tot_dsds = 0; 2091 2092 /* Send marker if required */ 2093 if (vha->marker_needed != 0) { 2094 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != 2095 QLA_SUCCESS) { 2096 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2097 return QLA_FUNCTION_FAILED; 2098 } 2099 vha->marker_needed = 0; 2100 } 2101 2102 handle = qla2xxx_get_next_handle(req); 2103 if (handle == 0) 2104 goto queuing_error; 2105 2106 /* Compute number of required data segments */ 2107 /* Map the sg table so we have an accurate count of sg entries needed */ 2108 if (scsi_sg_count(cmd)) { 2109 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 2110 scsi_sg_count(cmd), cmd->sc_data_direction); 2111 if (unlikely(!nseg)) 2112 goto queuing_error; 2113 else 2114 sp->flags |= SRB_DMA_VALID; 2115 2116 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2117 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2118 struct qla2_sgx sgx; 2119 uint32_t partial; 2120 2121 memset(&sgx, 0, sizeof(struct qla2_sgx)); 2122 sgx.tot_bytes = scsi_bufflen(cmd); 2123 sgx.cur_sg = scsi_sglist(cmd); 2124 sgx.sp = sp; 2125 2126 nseg = 0; 2127 while (qla24xx_get_one_block_sg( 2128 cmd->device->sector_size, &sgx, &partial)) 2129 nseg++; 2130 } 2131 } else 2132 nseg = 0; 2133 2134 /* number of required data segments */ 2135 tot_dsds = nseg; 2136 2137 /* Compute number of required protection segments */ 2138 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { 2139 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), 2140 scsi_prot_sg_count(cmd), cmd->sc_data_direction); 2141 if (unlikely(!nseg)) 2142 goto queuing_error; 2143 else 2144 sp->flags |= SRB_CRC_PROT_DMA_VALID; 2145 2146 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 2147 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { 2148 nseg = scsi_bufflen(cmd) / cmd->device->sector_size; 2149 } 2150 } else { 2151 nseg = 0; 2152 } 2153 2154 req_cnt = 1; 2155 /* Total Data and protection sg segment(s) */ 2156 tot_prot_dsds = nseg; 2157 tot_dsds += nseg; 2158 if (req->cnt < (req_cnt + 2)) { 2159 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 2160 RD_REG_DWORD_RELAXED(req->req_q_out); 2161 if (req->ring_index < cnt) 2162 req->cnt = cnt - req->ring_index; 2163 else 2164 req->cnt = req->length - 2165 (req->ring_index - cnt); 2166 if (req->cnt < (req_cnt + 2)) 2167 goto queuing_error; 2168 } 2169 2170 status |= QDSS_GOT_Q_SPACE; 2171 2172 /* Build header part of command packet (excluding the OPCODE). */ 2173 req->current_outstanding_cmd = handle; 2174 req->outstanding_cmds[handle] = sp; 2175 sp->handle = handle; 2176 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2177 req->cnt -= req_cnt; 2178 2179 /* Fill-in common area */ 2180 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; 2181 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 2182 2183 clr_ptr = (uint32_t *)cmd_pkt + 2; 2184 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 2185 2186 /* Set NPORT-ID and LUN number*/ 2187 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2188 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2189 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2190 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2191 2192 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2193 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2194 2195 /* Total Data and protection segment(s) */ 2196 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 2197 2198 /* Build IOCB segments and adjust for data protection segments */ 2199 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) 2200 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != 2201 QLA_SUCCESS) 2202 goto queuing_error; 2203 2204 cmd_pkt->entry_count = (uint8_t)req_cnt; 2205 cmd_pkt->timeout = cpu_to_le16(0); 2206 wmb(); 2207 2208 /* Adjust ring index. */ 2209 req->ring_index++; 2210 if (req->ring_index == req->length) { 2211 req->ring_index = 0; 2212 req->ring_ptr = req->ring; 2213 } else 2214 req->ring_ptr++; 2215 2216 /* Set chip new ring index. */ 2217 WRT_REG_DWORD(req->req_q_in, req->ring_index); 2218 2219 /* Manage unprocessed RIO/ZIO commands in response queue. */ 2220 if (vha->flags.process_response_queue && 2221 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 2222 qla24xx_process_response_queue(vha, rsp); 2223 2224 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2225 2226 return QLA_SUCCESS; 2227 2228 queuing_error: 2229 if (status & QDSS_GOT_Q_SPACE) { 2230 req->outstanding_cmds[handle] = NULL; 2231 req->cnt += req_cnt; 2232 } 2233 /* Cleanup will be performed by the caller (queuecommand) */ 2234 2235 spin_unlock_irqrestore(&qpair->qp_lock, flags); 2236 return QLA_FUNCTION_FAILED; 2237 } 2238 2239 /* Generic Control-SRB manipulation functions. */ 2240 2241 /* hardware_lock assumed to be held. */ 2242 2243 void * 2244 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) 2245 { 2246 scsi_qla_host_t *vha = qpair->vha; 2247 struct qla_hw_data *ha = vha->hw; 2248 struct req_que *req = qpair->req; 2249 device_reg_t *reg = ISP_QUE_REG(ha, req->id); 2250 uint32_t handle; 2251 request_t *pkt; 2252 uint16_t cnt, req_cnt; 2253 2254 pkt = NULL; 2255 req_cnt = 1; 2256 handle = 0; 2257 2258 if (sp && (sp->type != SRB_SCSI_CMD)) { 2259 /* Adjust entry-counts as needed. */ 2260 req_cnt = sp->iocbs; 2261 } 2262 2263 /* Check for room on request queue. */ 2264 if (req->cnt < req_cnt + 2) { 2265 if (qpair->use_shadow_reg) 2266 cnt = *req->out_ptr; 2267 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || 2268 IS_QLA28XX(ha)) 2269 cnt = RD_REG_DWORD(®->isp25mq.req_q_out); 2270 else if (IS_P3P_TYPE(ha)) 2271 cnt = RD_REG_DWORD(®->isp82.req_q_out); 2272 else if (IS_FWI2_CAPABLE(ha)) 2273 cnt = RD_REG_DWORD(®->isp24.req_q_out); 2274 else if (IS_QLAFX00(ha)) 2275 cnt = RD_REG_DWORD(®->ispfx00.req_q_out); 2276 else 2277 cnt = qla2x00_debounce_register( 2278 ISP_REQ_Q_OUT(ha, ®->isp)); 2279 2280 if (req->ring_index < cnt) 2281 req->cnt = cnt - req->ring_index; 2282 else 2283 req->cnt = req->length - 2284 (req->ring_index - cnt); 2285 } 2286 if (req->cnt < req_cnt + 2) 2287 goto queuing_error; 2288 2289 if (sp) { 2290 handle = qla2xxx_get_next_handle(req); 2291 if (handle == 0) { 2292 ql_log(ql_log_warn, vha, 0x700b, 2293 "No room on outstanding cmd array.\n"); 2294 goto queuing_error; 2295 } 2296 2297 /* Prep command array. */ 2298 req->current_outstanding_cmd = handle; 2299 req->outstanding_cmds[handle] = sp; 2300 sp->handle = handle; 2301 } 2302 2303 /* Prep packet */ 2304 req->cnt -= req_cnt; 2305 pkt = req->ring_ptr; 2306 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2307 if (IS_QLAFX00(ha)) { 2308 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); 2309 WRT_REG_WORD((void __iomem *)&pkt->handle, handle); 2310 } else { 2311 pkt->entry_count = req_cnt; 2312 pkt->handle = handle; 2313 } 2314 2315 return pkt; 2316 2317 queuing_error: 2318 qpair->tgt_counters.num_alloc_iocb_failed++; 2319 return pkt; 2320 } 2321 2322 void * 2323 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp) 2324 { 2325 scsi_qla_host_t *vha = qpair->vha; 2326 2327 if (qla2x00_reset_active(vha)) 2328 return NULL; 2329 2330 return __qla2x00_alloc_iocbs(qpair, sp); 2331 } 2332 2333 void * 2334 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp) 2335 { 2336 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp); 2337 } 2338 2339 static void 2340 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2341 { 2342 struct srb_iocb *lio = &sp->u.iocb_cmd; 2343 2344 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2345 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2346 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { 2347 logio->control_flags |= LCF_NVME_PRLI; 2348 if (sp->vha->flags.nvme_first_burst) 2349 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST; 2350 } 2351 2352 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2353 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2354 logio->port_id[1] = sp->fcport->d_id.b.area; 2355 logio->port_id[2] = sp->fcport->d_id.b.domain; 2356 logio->vp_index = sp->vha->vp_idx; 2357 } 2358 2359 static void 2360 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2361 { 2362 struct srb_iocb *lio = &sp->u.iocb_cmd; 2363 2364 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2365 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { 2366 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); 2367 } else { 2368 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 2369 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) 2370 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); 2371 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) 2372 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); 2373 } 2374 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2375 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2376 logio->port_id[1] = sp->fcport->d_id.b.area; 2377 logio->port_id[2] = sp->fcport->d_id.b.domain; 2378 logio->vp_index = sp->vha->vp_idx; 2379 } 2380 2381 static void 2382 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 2383 { 2384 struct qla_hw_data *ha = sp->vha->hw; 2385 struct srb_iocb *lio = &sp->u.iocb_cmd; 2386 uint16_t opts; 2387 2388 mbx->entry_type = MBX_IOCB_TYPE; 2389 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2390 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); 2391 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; 2392 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; 2393 if (HAS_EXTENDED_IDS(ha)) { 2394 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2395 mbx->mb10 = cpu_to_le16(opts); 2396 } else { 2397 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); 2398 } 2399 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2400 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2401 sp->fcport->d_id.b.al_pa); 2402 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2403 } 2404 2405 static void 2406 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2407 { 2408 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2409 logio->control_flags = 2410 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 2411 if (!sp->fcport->keep_nport_handle) 2412 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); 2413 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2414 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 2415 logio->port_id[1] = sp->fcport->d_id.b.area; 2416 logio->port_id[2] = sp->fcport->d_id.b.domain; 2417 logio->vp_index = sp->vha->vp_idx; 2418 } 2419 2420 static void 2421 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) 2422 { 2423 struct qla_hw_data *ha = sp->vha->hw; 2424 2425 mbx->entry_type = MBX_IOCB_TYPE; 2426 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2427 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); 2428 mbx->mb1 = HAS_EXTENDED_IDS(ha) ? 2429 cpu_to_le16(sp->fcport->loop_id) : 2430 cpu_to_le16(sp->fcport->loop_id << 8); 2431 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 2432 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 2433 sp->fcport->d_id.b.al_pa); 2434 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2435 /* Implicit: mbx->mbx10 = 0. */ 2436 } 2437 2438 static void 2439 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) 2440 { 2441 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2442 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 2443 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2444 logio->vp_index = sp->vha->vp_idx; 2445 } 2446 2447 static void 2448 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) 2449 { 2450 struct qla_hw_data *ha = sp->vha->hw; 2451 2452 mbx->entry_type = MBX_IOCB_TYPE; 2453 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); 2454 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); 2455 if (HAS_EXTENDED_IDS(ha)) { 2456 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); 2457 mbx->mb10 = cpu_to_le16(BIT_0); 2458 } else { 2459 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); 2460 } 2461 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); 2462 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2463 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2464 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2465 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx); 2466 } 2467 2468 static void 2469 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) 2470 { 2471 uint32_t flags; 2472 uint64_t lun; 2473 struct fc_port *fcport = sp->fcport; 2474 scsi_qla_host_t *vha = fcport->vha; 2475 struct qla_hw_data *ha = vha->hw; 2476 struct srb_iocb *iocb = &sp->u.iocb_cmd; 2477 struct req_que *req = vha->req; 2478 2479 flags = iocb->u.tmf.flags; 2480 lun = iocb->u.tmf.lun; 2481 2482 tsk->entry_type = TSK_MGMT_IOCB_TYPE; 2483 tsk->entry_count = 1; 2484 tsk->handle = MAKE_HANDLE(req->id, tsk->handle); 2485 tsk->nport_handle = cpu_to_le16(fcport->loop_id); 2486 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2487 tsk->control_flags = cpu_to_le32(flags); 2488 tsk->port_id[0] = fcport->d_id.b.al_pa; 2489 tsk->port_id[1] = fcport->d_id.b.area; 2490 tsk->port_id[2] = fcport->d_id.b.domain; 2491 tsk->vp_index = fcport->vha->vp_idx; 2492 2493 if (flags == TCF_LUN_RESET) { 2494 int_to_scsilun(lun, &tsk->lun); 2495 host_to_fcp_swap((uint8_t *)&tsk->lun, 2496 sizeof(tsk->lun)); 2497 } 2498 } 2499 2500 void qla2x00_init_timer(srb_t *sp, unsigned long tmo) 2501 { 2502 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); 2503 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; 2504 sp->free = qla2x00_sp_free; 2505 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) 2506 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); 2507 sp->start_timer = 1; 2508 } 2509 2510 static void qla2x00_els_dcmd_sp_free(srb_t *sp) 2511 { 2512 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2513 2514 kfree(sp->fcport); 2515 2516 if (elsio->u.els_logo.els_logo_pyld) 2517 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, 2518 elsio->u.els_logo.els_logo_pyld, 2519 elsio->u.els_logo.els_logo_pyld_dma); 2520 2521 del_timer(&elsio->timer); 2522 qla2x00_rel_sp(sp); 2523 } 2524 2525 static void 2526 qla2x00_els_dcmd_iocb_timeout(void *data) 2527 { 2528 srb_t *sp = data; 2529 fc_port_t *fcport = sp->fcport; 2530 struct scsi_qla_host *vha = sp->vha; 2531 struct srb_iocb *lio = &sp->u.iocb_cmd; 2532 2533 ql_dbg(ql_dbg_io, vha, 0x3069, 2534 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", 2535 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 2536 fcport->d_id.b.al_pa); 2537 2538 complete(&lio->u.els_logo.comp); 2539 } 2540 2541 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) 2542 { 2543 fc_port_t *fcport = sp->fcport; 2544 struct srb_iocb *lio = &sp->u.iocb_cmd; 2545 struct scsi_qla_host *vha = sp->vha; 2546 2547 ql_dbg(ql_dbg_io, vha, 0x3072, 2548 "%s hdl=%x, portid=%02x%02x%02x done\n", 2549 sp->name, sp->handle, fcport->d_id.b.domain, 2550 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2551 2552 complete(&lio->u.els_logo.comp); 2553 } 2554 2555 int 2556 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, 2557 port_id_t remote_did) 2558 { 2559 srb_t *sp; 2560 fc_port_t *fcport = NULL; 2561 struct srb_iocb *elsio = NULL; 2562 struct qla_hw_data *ha = vha->hw; 2563 struct els_logo_payload logo_pyld; 2564 int rval = QLA_SUCCESS; 2565 2566 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2567 if (!fcport) { 2568 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n"); 2569 return -ENOMEM; 2570 } 2571 2572 /* Alloc SRB structure */ 2573 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2574 if (!sp) { 2575 kfree(fcport); 2576 ql_log(ql_log_info, vha, 0x70e6, 2577 "SRB allocation failed\n"); 2578 return -ENOMEM; 2579 } 2580 2581 elsio = &sp->u.iocb_cmd; 2582 fcport->loop_id = 0xFFFF; 2583 fcport->d_id.b.domain = remote_did.b.domain; 2584 fcport->d_id.b.area = remote_did.b.area; 2585 fcport->d_id.b.al_pa = remote_did.b.al_pa; 2586 2587 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n", 2588 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 2589 2590 sp->type = SRB_ELS_DCMD; 2591 sp->name = "ELS_DCMD"; 2592 sp->fcport = fcport; 2593 elsio->timeout = qla2x00_els_dcmd_iocb_timeout; 2594 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); 2595 init_completion(&sp->u.iocb_cmd.u.els_logo.comp); 2596 sp->done = qla2x00_els_dcmd_sp_done; 2597 sp->free = qla2x00_els_dcmd_sp_free; 2598 2599 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, 2600 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, 2601 GFP_KERNEL); 2602 2603 if (!elsio->u.els_logo.els_logo_pyld) { 2604 sp->free(sp); 2605 return QLA_FUNCTION_FAILED; 2606 } 2607 2608 memset(&logo_pyld, 0, sizeof(struct els_logo_payload)); 2609 2610 elsio->u.els_logo.els_cmd = els_opcode; 2611 logo_pyld.opcode = els_opcode; 2612 logo_pyld.s_id[0] = vha->d_id.b.al_pa; 2613 logo_pyld.s_id[1] = vha->d_id.b.area; 2614 logo_pyld.s_id[2] = vha->d_id.b.domain; 2615 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t)); 2616 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE); 2617 2618 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, 2619 sizeof(struct els_logo_payload)); 2620 2621 rval = qla2x00_start_sp(sp); 2622 if (rval != QLA_SUCCESS) { 2623 sp->free(sp); 2624 return QLA_FUNCTION_FAILED; 2625 } 2626 2627 ql_dbg(ql_dbg_io, vha, 0x3074, 2628 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n", 2629 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain, 2630 fcport->d_id.b.area, fcport->d_id.b.al_pa); 2631 2632 wait_for_completion(&elsio->u.els_logo.comp); 2633 2634 sp->free(sp); 2635 return rval; 2636 } 2637 2638 static void 2639 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2640 { 2641 scsi_qla_host_t *vha = sp->vha; 2642 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2643 2644 els_iocb->entry_type = ELS_IOCB_TYPE; 2645 els_iocb->entry_count = 1; 2646 els_iocb->sys_define = 0; 2647 els_iocb->entry_status = 0; 2648 els_iocb->handle = sp->handle; 2649 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2650 els_iocb->tx_dsd_count = 1; 2651 els_iocb->vp_index = vha->vp_idx; 2652 els_iocb->sof_type = EST_SOFI3; 2653 els_iocb->rx_dsd_count = 0; 2654 els_iocb->opcode = elsio->u.els_logo.els_cmd; 2655 2656 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2657 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2658 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2659 els_iocb->s_id[0] = vha->d_id.b.al_pa; 2660 els_iocb->s_id[1] = vha->d_id.b.area; 2661 els_iocb->s_id[2] = vha->d_id.b.domain; 2662 2663 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { 2664 els_iocb->control_flags = 0; 2665 els_iocb->tx_byte_count = els_iocb->tx_len = 2666 cpu_to_le32(sizeof(struct els_plogi_payload)); 2667 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, 2668 &els_iocb->tx_address); 2669 els_iocb->rx_dsd_count = 1; 2670 els_iocb->rx_byte_count = els_iocb->rx_len = 2671 cpu_to_le32(sizeof(struct els_plogi_payload)); 2672 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, 2673 &els_iocb->rx_address); 2674 2675 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, 2676 "PLOGI ELS IOCB:\n"); 2677 ql_dump_buffer(ql_log_info, vha, 0x0109, 2678 (uint8_t *)els_iocb, 0x70); 2679 } else { 2680 els_iocb->control_flags = 1 << 13; 2681 els_iocb->tx_byte_count = 2682 cpu_to_le32(sizeof(struct els_logo_payload)); 2683 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, 2684 &els_iocb->tx_address); 2685 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); 2686 2687 els_iocb->rx_byte_count = 0; 2688 els_iocb->rx_address = 0; 2689 els_iocb->rx_len = 0; 2690 } 2691 2692 sp->vha->qla_stats.control_requests++; 2693 } 2694 2695 static void 2696 qla2x00_els_dcmd2_iocb_timeout(void *data) 2697 { 2698 srb_t *sp = data; 2699 fc_port_t *fcport = sp->fcport; 2700 struct scsi_qla_host *vha = sp->vha; 2701 struct qla_hw_data *ha = vha->hw; 2702 unsigned long flags = 0; 2703 int res; 2704 2705 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, 2706 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", 2707 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); 2708 2709 /* Abort the exchange */ 2710 spin_lock_irqsave(&ha->hardware_lock, flags); 2711 res = ha->isp_ops->abort_command(sp); 2712 ql_dbg(ql_dbg_io, vha, 0x3070, 2713 "mbx abort_command %s\n", 2714 (res == QLA_SUCCESS) ? "successful" : "failed"); 2715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2716 2717 sp->done(sp, QLA_FUNCTION_TIMEOUT); 2718 } 2719 2720 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) 2721 { 2722 if (els_plogi->els_plogi_pyld) 2723 dma_free_coherent(&vha->hw->pdev->dev, 2724 els_plogi->tx_size, 2725 els_plogi->els_plogi_pyld, 2726 els_plogi->els_plogi_pyld_dma); 2727 2728 if (els_plogi->els_resp_pyld) 2729 dma_free_coherent(&vha->hw->pdev->dev, 2730 els_plogi->rx_size, 2731 els_plogi->els_resp_pyld, 2732 els_plogi->els_resp_pyld_dma); 2733 } 2734 2735 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) 2736 { 2737 fc_port_t *fcport = sp->fcport; 2738 struct srb_iocb *lio = &sp->u.iocb_cmd; 2739 struct scsi_qla_host *vha = sp->vha; 2740 struct event_arg ea; 2741 struct qla_work_evt *e; 2742 2743 ql_dbg(ql_dbg_disc, vha, 0x3072, 2744 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", 2745 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); 2746 2747 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); 2748 del_timer(&sp->u.iocb_cmd.timer); 2749 2750 if (sp->flags & SRB_WAKEUP_ON_COMP) 2751 complete(&lio->u.els_plogi.comp); 2752 else { 2753 if (res) { 2754 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 2755 } else { 2756 memset(&ea, 0, sizeof(ea)); 2757 ea.fcport = fcport; 2758 ea.data[0] = MBS_COMMAND_COMPLETE; 2759 ea.sp = sp; 2760 qla24xx_handle_plogi_done_event(vha, &ea); 2761 } 2762 2763 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); 2764 if (!e) { 2765 struct srb_iocb *elsio = &sp->u.iocb_cmd; 2766 2767 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 2768 sp->free(sp); 2769 return; 2770 } 2771 e->u.iosb.sp = sp; 2772 qla2x00_post_work(vha, e); 2773 } 2774 } 2775 2776 int 2777 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, 2778 fc_port_t *fcport, bool wait) 2779 { 2780 srb_t *sp; 2781 struct srb_iocb *elsio = NULL; 2782 struct qla_hw_data *ha = vha->hw; 2783 int rval = QLA_SUCCESS; 2784 void *ptr, *resp_ptr; 2785 2786 /* Alloc SRB structure */ 2787 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2788 if (!sp) { 2789 ql_log(ql_log_info, vha, 0x70e6, 2790 "SRB allocation failed\n"); 2791 return -ENOMEM; 2792 } 2793 2794 elsio = &sp->u.iocb_cmd; 2795 ql_dbg(ql_dbg_io, vha, 0x3073, 2796 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); 2797 2798 fcport->flags |= FCF_ASYNC_SENT; 2799 sp->type = SRB_ELS_DCMD; 2800 sp->name = "ELS_DCMD"; 2801 sp->fcport = fcport; 2802 2803 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; 2804 init_completion(&elsio->u.els_plogi.comp); 2805 if (wait) 2806 sp->flags = SRB_WAKEUP_ON_COMP; 2807 2808 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); 2809 2810 sp->done = qla2x00_els_dcmd2_sp_done; 2811 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; 2812 2813 ptr = elsio->u.els_plogi.els_plogi_pyld = 2814 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 2815 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL); 2816 2817 if (!elsio->u.els_plogi.els_plogi_pyld) { 2818 rval = QLA_FUNCTION_FAILED; 2819 goto out; 2820 } 2821 2822 resp_ptr = elsio->u.els_plogi.els_resp_pyld = 2823 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, 2824 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL); 2825 2826 if (!elsio->u.els_plogi.els_resp_pyld) { 2827 rval = QLA_FUNCTION_FAILED; 2828 goto out; 2829 } 2830 2831 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); 2832 2833 memset(ptr, 0, sizeof(struct els_plogi_payload)); 2834 memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); 2835 memcpy(elsio->u.els_plogi.els_plogi_pyld->data, 2836 &ha->plogi_els_payld.data, 2837 sizeof(elsio->u.els_plogi.els_plogi_pyld->data)); 2838 2839 elsio->u.els_plogi.els_cmd = els_opcode; 2840 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; 2841 2842 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); 2843 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, 2844 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); 2845 2846 rval = qla2x00_start_sp(sp); 2847 if (rval != QLA_SUCCESS) { 2848 rval = QLA_FUNCTION_FAILED; 2849 } else { 2850 ql_dbg(ql_dbg_disc, vha, 0x3074, 2851 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", 2852 sp->name, sp->handle, fcport->loop_id, 2853 fcport->d_id.b24, vha->d_id.b24); 2854 } 2855 2856 if (wait) { 2857 wait_for_completion(&elsio->u.els_plogi.comp); 2858 2859 if (elsio->u.els_plogi.comp_status != CS_COMPLETE) 2860 rval = QLA_FUNCTION_FAILED; 2861 } else { 2862 goto done; 2863 } 2864 2865 out: 2866 fcport->flags &= ~(FCF_ASYNC_SENT); 2867 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); 2868 sp->free(sp); 2869 done: 2870 return rval; 2871 } 2872 2873 static void 2874 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2875 { 2876 struct bsg_job *bsg_job = sp->u.bsg_job; 2877 struct fc_bsg_request *bsg_request = bsg_job->request; 2878 2879 els_iocb->entry_type = ELS_IOCB_TYPE; 2880 els_iocb->entry_count = 1; 2881 els_iocb->sys_define = 0; 2882 els_iocb->entry_status = 0; 2883 els_iocb->handle = sp->handle; 2884 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2885 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2886 els_iocb->vp_index = sp->vha->vp_idx; 2887 els_iocb->sof_type = EST_SOFI3; 2888 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2889 2890 els_iocb->opcode = 2891 sp->type == SRB_ELS_CMD_RPT ? 2892 bsg_request->rqst_data.r_els.els_code : 2893 bsg_request->rqst_data.h_els.command_code; 2894 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2895 els_iocb->port_id[1] = sp->fcport->d_id.b.area; 2896 els_iocb->port_id[2] = sp->fcport->d_id.b.domain; 2897 els_iocb->control_flags = 0; 2898 els_iocb->rx_byte_count = 2899 cpu_to_le32(bsg_job->reply_payload.payload_len); 2900 els_iocb->tx_byte_count = 2901 cpu_to_le32(bsg_job->request_payload.payload_len); 2902 2903 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 2904 &els_iocb->tx_address); 2905 els_iocb->tx_len = cpu_to_le32(sg_dma_len 2906 (bsg_job->request_payload.sg_list)); 2907 2908 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 2909 &els_iocb->rx_address); 2910 els_iocb->rx_len = cpu_to_le32(sg_dma_len 2911 (bsg_job->reply_payload.sg_list)); 2912 2913 sp->vha->qla_stats.control_requests++; 2914 } 2915 2916 static void 2917 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) 2918 { 2919 uint16_t avail_dsds; 2920 struct dsd64 *cur_dsd; 2921 struct scatterlist *sg; 2922 int index; 2923 uint16_t tot_dsds; 2924 scsi_qla_host_t *vha = sp->vha; 2925 struct qla_hw_data *ha = vha->hw; 2926 struct bsg_job *bsg_job = sp->u.bsg_job; 2927 int entry_count = 1; 2928 2929 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 2930 ct_iocb->entry_type = CT_IOCB_TYPE; 2931 ct_iocb->entry_status = 0; 2932 ct_iocb->handle1 = sp->handle; 2933 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 2934 ct_iocb->status = cpu_to_le16(0); 2935 ct_iocb->control_flags = cpu_to_le16(0); 2936 ct_iocb->timeout = 0; 2937 ct_iocb->cmd_dsd_count = 2938 cpu_to_le16(bsg_job->request_payload.sg_cnt); 2939 ct_iocb->total_dsd_count = 2940 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 2941 ct_iocb->req_bytecount = 2942 cpu_to_le32(bsg_job->request_payload.payload_len); 2943 ct_iocb->rsp_bytecount = 2944 cpu_to_le32(bsg_job->reply_payload.payload_len); 2945 2946 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), 2947 &ct_iocb->req_dsd.address); 2948 ct_iocb->req_dsd.length = ct_iocb->req_bytecount; 2949 2950 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), 2951 &ct_iocb->rsp_dsd.address); 2952 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount; 2953 2954 avail_dsds = 1; 2955 cur_dsd = &ct_iocb->rsp_dsd; 2956 index = 0; 2957 tot_dsds = bsg_job->reply_payload.sg_cnt; 2958 2959 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { 2960 cont_a64_entry_t *cont_pkt; 2961 2962 /* Allocate additional continuation packets? */ 2963 if (avail_dsds == 0) { 2964 /* 2965 * Five DSDs are available in the Cont. 2966 * Type 1 IOCB. 2967 */ 2968 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 2969 vha->hw->req_q_map[0]); 2970 cur_dsd = cont_pkt->dsd; 2971 avail_dsds = 5; 2972 entry_count++; 2973 } 2974 2975 append_dsd64(&cur_dsd, sg); 2976 avail_dsds--; 2977 } 2978 ct_iocb->entry_count = entry_count; 2979 2980 sp->vha->qla_stats.control_requests++; 2981 } 2982 2983 static void 2984 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) 2985 { 2986 uint16_t avail_dsds; 2987 struct dsd64 *cur_dsd; 2988 struct scatterlist *sg; 2989 int index; 2990 uint16_t cmd_dsds, rsp_dsds; 2991 scsi_qla_host_t *vha = sp->vha; 2992 struct qla_hw_data *ha = vha->hw; 2993 struct bsg_job *bsg_job = sp->u.bsg_job; 2994 int entry_count = 1; 2995 cont_a64_entry_t *cont_pkt = NULL; 2996 2997 ct_iocb->entry_type = CT_IOCB_TYPE; 2998 ct_iocb->entry_status = 0; 2999 ct_iocb->sys_define = 0; 3000 ct_iocb->handle = sp->handle; 3001 3002 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3003 ct_iocb->vp_index = sp->vha->vp_idx; 3004 ct_iocb->comp_status = cpu_to_le16(0); 3005 3006 cmd_dsds = bsg_job->request_payload.sg_cnt; 3007 rsp_dsds = bsg_job->reply_payload.sg_cnt; 3008 3009 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds); 3010 ct_iocb->timeout = 0; 3011 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds); 3012 ct_iocb->cmd_byte_count = 3013 cpu_to_le32(bsg_job->request_payload.payload_len); 3014 3015 avail_dsds = 2; 3016 cur_dsd = ct_iocb->dsd; 3017 index = 0; 3018 3019 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) { 3020 /* Allocate additional continuation packets? */ 3021 if (avail_dsds == 0) { 3022 /* 3023 * Five DSDs are available in the Cont. 3024 * Type 1 IOCB. 3025 */ 3026 cont_pkt = qla2x00_prep_cont_type1_iocb( 3027 vha, ha->req_q_map[0]); 3028 cur_dsd = cont_pkt->dsd; 3029 avail_dsds = 5; 3030 entry_count++; 3031 } 3032 3033 append_dsd64(&cur_dsd, sg); 3034 avail_dsds--; 3035 } 3036 3037 index = 0; 3038 3039 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) { 3040 /* Allocate additional continuation packets? */ 3041 if (avail_dsds == 0) { 3042 /* 3043 * Five DSDs are available in the Cont. 3044 * Type 1 IOCB. 3045 */ 3046 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, 3047 ha->req_q_map[0]); 3048 cur_dsd = cont_pkt->dsd; 3049 avail_dsds = 5; 3050 entry_count++; 3051 } 3052 3053 append_dsd64(&cur_dsd, sg); 3054 avail_dsds--; 3055 } 3056 ct_iocb->entry_count = entry_count; 3057 } 3058 3059 /* 3060 * qla82xx_start_scsi() - Send a SCSI command to the ISP 3061 * @sp: command to send to the ISP 3062 * 3063 * Returns non-zero if a failure occurred, else zero. 3064 */ 3065 int 3066 qla82xx_start_scsi(srb_t *sp) 3067 { 3068 int nseg; 3069 unsigned long flags; 3070 struct scsi_cmnd *cmd; 3071 uint32_t *clr_ptr; 3072 uint32_t handle; 3073 uint16_t cnt; 3074 uint16_t req_cnt; 3075 uint16_t tot_dsds; 3076 struct device_reg_82xx __iomem *reg; 3077 uint32_t dbval; 3078 uint32_t *fcp_dl; 3079 uint8_t additional_cdb_len; 3080 struct ct6_dsd *ctx; 3081 struct scsi_qla_host *vha = sp->vha; 3082 struct qla_hw_data *ha = vha->hw; 3083 struct req_que *req = NULL; 3084 struct rsp_que *rsp = NULL; 3085 3086 /* Setup device pointers. */ 3087 reg = &ha->iobase->isp82; 3088 cmd = GET_CMD_SP(sp); 3089 req = vha->req; 3090 rsp = ha->rsp_q_map[0]; 3091 3092 /* So we know we haven't pci_map'ed anything yet */ 3093 tot_dsds = 0; 3094 3095 dbval = 0x04 | (ha->portnum << 5); 3096 3097 /* Send marker if required */ 3098 if (vha->marker_needed != 0) { 3099 if (qla2x00_marker(vha, ha->base_qpair, 3100 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 3101 ql_log(ql_log_warn, vha, 0x300c, 3102 "qla2x00_marker failed for cmd=%p.\n", cmd); 3103 return QLA_FUNCTION_FAILED; 3104 } 3105 vha->marker_needed = 0; 3106 } 3107 3108 /* Acquire ring specific lock */ 3109 spin_lock_irqsave(&ha->hardware_lock, flags); 3110 3111 handle = qla2xxx_get_next_handle(req); 3112 if (handle == 0) 3113 goto queuing_error; 3114 3115 /* Map the sg table so we have an accurate count of sg entries needed */ 3116 if (scsi_sg_count(cmd)) { 3117 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), 3118 scsi_sg_count(cmd), cmd->sc_data_direction); 3119 if (unlikely(!nseg)) 3120 goto queuing_error; 3121 } else 3122 nseg = 0; 3123 3124 tot_dsds = nseg; 3125 3126 if (tot_dsds > ql2xshiftctondsd) { 3127 struct cmd_type_6 *cmd_pkt; 3128 uint16_t more_dsd_lists = 0; 3129 struct dsd_dma *dsd_ptr; 3130 uint16_t i; 3131 3132 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); 3133 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { 3134 ql_dbg(ql_dbg_io, vha, 0x300d, 3135 "Num of DSD list %d is than %d for cmd=%p.\n", 3136 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, 3137 cmd); 3138 goto queuing_error; 3139 } 3140 3141 if (more_dsd_lists <= ha->gbl_dsd_avail) 3142 goto sufficient_dsds; 3143 else 3144 more_dsd_lists -= ha->gbl_dsd_avail; 3145 3146 for (i = 0; i < more_dsd_lists; i++) { 3147 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); 3148 if (!dsd_ptr) { 3149 ql_log(ql_log_fatal, vha, 0x300e, 3150 "Failed to allocate memory for dsd_dma " 3151 "for cmd=%p.\n", cmd); 3152 goto queuing_error; 3153 } 3154 3155 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool, 3156 GFP_ATOMIC, &dsd_ptr->dsd_list_dma); 3157 if (!dsd_ptr->dsd_addr) { 3158 kfree(dsd_ptr); 3159 ql_log(ql_log_fatal, vha, 0x300f, 3160 "Failed to allocate memory for dsd_addr " 3161 "for cmd=%p.\n", cmd); 3162 goto queuing_error; 3163 } 3164 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); 3165 ha->gbl_dsd_avail++; 3166 } 3167 3168 sufficient_dsds: 3169 req_cnt = 1; 3170 3171 if (req->cnt < (req_cnt + 2)) { 3172 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 3173 ®->req_q_out[0]); 3174 if (req->ring_index < cnt) 3175 req->cnt = cnt - req->ring_index; 3176 else 3177 req->cnt = req->length - 3178 (req->ring_index - cnt); 3179 if (req->cnt < (req_cnt + 2)) 3180 goto queuing_error; 3181 } 3182 3183 ctx = sp->u.scmd.ct6_ctx = 3184 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 3185 if (!ctx) { 3186 ql_log(ql_log_fatal, vha, 0x3010, 3187 "Failed to allocate ctx for cmd=%p.\n", cmd); 3188 goto queuing_error; 3189 } 3190 3191 memset(ctx, 0, sizeof(struct ct6_dsd)); 3192 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, 3193 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 3194 if (!ctx->fcp_cmnd) { 3195 ql_log(ql_log_fatal, vha, 0x3011, 3196 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 3197 goto queuing_error; 3198 } 3199 3200 /* Initialize the DSD list and dma handle */ 3201 INIT_LIST_HEAD(&ctx->dsd_list); 3202 ctx->dsd_use_cnt = 0; 3203 3204 if (cmd->cmd_len > 16) { 3205 additional_cdb_len = cmd->cmd_len - 16; 3206 if ((cmd->cmd_len % 4) != 0) { 3207 /* SCSI command bigger than 16 bytes must be 3208 * multiple of 4 3209 */ 3210 ql_log(ql_log_warn, vha, 0x3012, 3211 "scsi cmd len %d not multiple of 4 " 3212 "for cmd=%p.\n", cmd->cmd_len, cmd); 3213 goto queuing_error_fcp_cmnd; 3214 } 3215 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; 3216 } else { 3217 additional_cdb_len = 0; 3218 ctx->fcp_cmnd_len = 12 + 16 + 4; 3219 } 3220 3221 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; 3222 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 3223 3224 /* Zero out remaining portion of packet. */ 3225 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 3226 clr_ptr = (uint32_t *)cmd_pkt + 2; 3227 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3228 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3229 3230 /* Set NPORT-ID and LUN number*/ 3231 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3232 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3233 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3234 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3235 cmd_pkt->vp_index = sp->vha->vp_idx; 3236 3237 /* Build IOCB segments */ 3238 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 3239 goto queuing_error_fcp_cmnd; 3240 3241 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3242 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 3243 3244 /* build FCP_CMND IU */ 3245 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); 3246 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 3247 3248 if (cmd->sc_data_direction == DMA_TO_DEVICE) 3249 ctx->fcp_cmnd->additional_cdb_len |= 1; 3250 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 3251 ctx->fcp_cmnd->additional_cdb_len |= 2; 3252 3253 /* Populate the FCP_PRIO. */ 3254 if (ha->flags.fcp_prio_enabled) 3255 ctx->fcp_cmnd->task_attribute |= 3256 sp->fcport->fcp_prio << 3; 3257 3258 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 3259 3260 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + 3261 additional_cdb_len); 3262 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); 3263 3264 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); 3265 put_unaligned_le64(ctx->fcp_cmnd_dma, 3266 &cmd_pkt->fcp_cmnd_dseg_address); 3267 3268 sp->flags |= SRB_FCP_CMND_DMA_VALID; 3269 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3270 /* Set total data segment count. */ 3271 cmd_pkt->entry_count = (uint8_t)req_cnt; 3272 /* Specify response queue number where 3273 * completion should happen 3274 */ 3275 cmd_pkt->entry_status = (uint8_t) rsp->id; 3276 } else { 3277 struct cmd_type_7 *cmd_pkt; 3278 3279 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3280 if (req->cnt < (req_cnt + 2)) { 3281 cnt = (uint16_t)RD_REG_DWORD_RELAXED( 3282 ®->req_q_out[0]); 3283 if (req->ring_index < cnt) 3284 req->cnt = cnt - req->ring_index; 3285 else 3286 req->cnt = req->length - 3287 (req->ring_index - cnt); 3288 } 3289 if (req->cnt < (req_cnt + 2)) 3290 goto queuing_error; 3291 3292 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 3293 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 3294 3295 /* Zero out remaining portion of packet. */ 3296 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3297 clr_ptr = (uint32_t *)cmd_pkt + 2; 3298 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3299 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); 3300 3301 /* Set NPORT-ID and LUN number*/ 3302 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3303 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 3304 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 3305 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 3306 cmd_pkt->vp_index = sp->vha->vp_idx; 3307 3308 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 3309 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 3310 sizeof(cmd_pkt->lun)); 3311 3312 /* Populate the FCP_PRIO. */ 3313 if (ha->flags.fcp_prio_enabled) 3314 cmd_pkt->task |= sp->fcport->fcp_prio << 3; 3315 3316 /* Load SCSI command packet. */ 3317 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 3318 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 3319 3320 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); 3321 3322 /* Build IOCB segments */ 3323 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req); 3324 3325 /* Set total data segment count. */ 3326 cmd_pkt->entry_count = (uint8_t)req_cnt; 3327 /* Specify response queue number where 3328 * completion should happen. 3329 */ 3330 cmd_pkt->entry_status = (uint8_t) rsp->id; 3331 3332 } 3333 /* Build command packet. */ 3334 req->current_outstanding_cmd = handle; 3335 req->outstanding_cmds[handle] = sp; 3336 sp->handle = handle; 3337 cmd->host_scribble = (unsigned char *)(unsigned long)handle; 3338 req->cnt -= req_cnt; 3339 wmb(); 3340 3341 /* Adjust ring index. */ 3342 req->ring_index++; 3343 if (req->ring_index == req->length) { 3344 req->ring_index = 0; 3345 req->ring_ptr = req->ring; 3346 } else 3347 req->ring_ptr++; 3348 3349 sp->flags |= SRB_DMA_VALID; 3350 3351 /* Set chip new ring index. */ 3352 /* write, read and verify logic */ 3353 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 3354 if (ql2xdbwr) 3355 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); 3356 else { 3357 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 3358 wmb(); 3359 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { 3360 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); 3361 wmb(); 3362 } 3363 } 3364 3365 /* Manage unprocessed RIO/ZIO commands in response queue. */ 3366 if (vha->flags.process_response_queue && 3367 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 3368 qla24xx_process_response_queue(vha, rsp); 3369 3370 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3371 return QLA_SUCCESS; 3372 3373 queuing_error_fcp_cmnd: 3374 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); 3375 queuing_error: 3376 if (tot_dsds) 3377 scsi_dma_unmap(cmd); 3378 3379 if (sp->u.scmd.crc_ctx) { 3380 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); 3381 sp->u.scmd.crc_ctx = NULL; 3382 } 3383 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3384 3385 return QLA_FUNCTION_FAILED; 3386 } 3387 3388 static void 3389 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 3390 { 3391 struct srb_iocb *aio = &sp->u.iocb_cmd; 3392 scsi_qla_host_t *vha = sp->vha; 3393 struct req_que *req = sp->qpair->req; 3394 3395 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); 3396 abt_iocb->entry_type = ABORT_IOCB_TYPE; 3397 abt_iocb->entry_count = 1; 3398 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3399 if (sp->fcport) { 3400 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3401 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 3402 abt_iocb->port_id[1] = sp->fcport->d_id.b.area; 3403 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; 3404 } 3405 abt_iocb->handle_to_abort = 3406 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, 3407 aio->u.abt.cmd_hndl)); 3408 abt_iocb->vp_index = vha->vp_idx; 3409 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); 3410 /* Send the command to the firmware */ 3411 wmb(); 3412 } 3413 3414 static void 3415 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) 3416 { 3417 int i, sz; 3418 3419 mbx->entry_type = MBX_IOCB_TYPE; 3420 mbx->handle = sp->handle; 3421 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); 3422 3423 for (i = 0; i < sz; i++) 3424 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); 3425 } 3426 3427 static void 3428 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt) 3429 { 3430 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt; 3431 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg); 3432 ct_pkt->handle = sp->handle; 3433 } 3434 3435 static void qla2x00_send_notify_ack_iocb(srb_t *sp, 3436 struct nack_to_isp *nack) 3437 { 3438 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy; 3439 3440 nack->entry_type = NOTIFY_ACK_TYPE; 3441 nack->entry_count = 1; 3442 nack->ox_id = ntfy->ox_id; 3443 3444 nack->u.isp24.handle = sp->handle; 3445 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3446 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3447 nack->u.isp24.flags = ntfy->u.isp24.flags & 3448 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3449 } 3450 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3451 nack->u.isp24.status = ntfy->u.isp24.status; 3452 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3453 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3454 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3455 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3456 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3457 nack->u.isp24.srr_flags = 0; 3458 nack->u.isp24.srr_reject_code = 0; 3459 nack->u.isp24.srr_reject_code_expl = 0; 3460 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3461 } 3462 3463 /* 3464 * Build NVME LS request 3465 */ 3466 static int 3467 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) 3468 { 3469 struct srb_iocb *nvme; 3470 int rval = QLA_SUCCESS; 3471 3472 nvme = &sp->u.iocb_cmd; 3473 cmd_pkt->entry_type = PT_LS4_REQUEST; 3474 cmd_pkt->entry_count = 1; 3475 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; 3476 3477 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); 3478 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3479 cmd_pkt->vp_index = sp->fcport->vha->vp_idx; 3480 3481 cmd_pkt->tx_dseg_count = 1; 3482 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; 3483 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; 3484 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); 3485 3486 cmd_pkt->rx_dseg_count = 1; 3487 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; 3488 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; 3489 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); 3490 3491 return rval; 3492 } 3493 3494 static void 3495 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce) 3496 { 3497 int map, pos; 3498 3499 vce->entry_type = VP_CTRL_IOCB_TYPE; 3500 vce->handle = sp->handle; 3501 vce->entry_count = 1; 3502 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd); 3503 vce->vp_count = cpu_to_le16(1); 3504 3505 /* 3506 * index map in firmware starts with 1; decrement index 3507 * this is ok as we never use index 0 3508 */ 3509 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; 3510 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7; 3511 vce->vp_idx_map[map] |= 1 << pos; 3512 } 3513 3514 static void 3515 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) 3516 { 3517 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 3518 logio->control_flags = 3519 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO); 3520 3521 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 3522 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 3523 logio->port_id[1] = sp->fcport->d_id.b.area; 3524 logio->port_id[2] = sp->fcport->d_id.b.domain; 3525 logio->vp_index = sp->fcport->vha->vp_idx; 3526 } 3527 3528 int 3529 qla2x00_start_sp(srb_t *sp) 3530 { 3531 int rval = QLA_SUCCESS; 3532 scsi_qla_host_t *vha = sp->vha; 3533 struct qla_hw_data *ha = vha->hw; 3534 struct qla_qpair *qp = sp->qpair; 3535 void *pkt; 3536 unsigned long flags; 3537 3538 spin_lock_irqsave(qp->qp_lock_ptr, flags); 3539 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); 3540 if (!pkt) { 3541 rval = EAGAIN; 3542 ql_log(ql_log_warn, vha, 0x700c, 3543 "qla2x00_alloc_iocbs failed.\n"); 3544 goto done; 3545 } 3546 3547 switch (sp->type) { 3548 case SRB_LOGIN_CMD: 3549 IS_FWI2_CAPABLE(ha) ? 3550 qla24xx_login_iocb(sp, pkt) : 3551 qla2x00_login_iocb(sp, pkt); 3552 break; 3553 case SRB_PRLI_CMD: 3554 qla24xx_prli_iocb(sp, pkt); 3555 break; 3556 case SRB_LOGOUT_CMD: 3557 IS_FWI2_CAPABLE(ha) ? 3558 qla24xx_logout_iocb(sp, pkt) : 3559 qla2x00_logout_iocb(sp, pkt); 3560 break; 3561 case SRB_ELS_CMD_RPT: 3562 case SRB_ELS_CMD_HST: 3563 qla24xx_els_iocb(sp, pkt); 3564 break; 3565 case SRB_CT_CMD: 3566 IS_FWI2_CAPABLE(ha) ? 3567 qla24xx_ct_iocb(sp, pkt) : 3568 qla2x00_ct_iocb(sp, pkt); 3569 break; 3570 case SRB_ADISC_CMD: 3571 IS_FWI2_CAPABLE(ha) ? 3572 qla24xx_adisc_iocb(sp, pkt) : 3573 qla2x00_adisc_iocb(sp, pkt); 3574 break; 3575 case SRB_TM_CMD: 3576 IS_QLAFX00(ha) ? 3577 qlafx00_tm_iocb(sp, pkt) : 3578 qla24xx_tm_iocb(sp, pkt); 3579 break; 3580 case SRB_FXIOCB_DCMD: 3581 case SRB_FXIOCB_BCMD: 3582 qlafx00_fxdisc_iocb(sp, pkt); 3583 break; 3584 case SRB_NVME_LS: 3585 qla_nvme_ls(sp, pkt); 3586 break; 3587 case SRB_ABT_CMD: 3588 IS_QLAFX00(ha) ? 3589 qlafx00_abort_iocb(sp, pkt) : 3590 qla24xx_abort_iocb(sp, pkt); 3591 break; 3592 case SRB_ELS_DCMD: 3593 qla24xx_els_logo_iocb(sp, pkt); 3594 break; 3595 case SRB_CT_PTHRU_CMD: 3596 qla2x00_ctpthru_cmd_iocb(sp, pkt); 3597 break; 3598 case SRB_MB_IOCB: 3599 qla2x00_mb_iocb(sp, pkt); 3600 break; 3601 case SRB_NACK_PLOGI: 3602 case SRB_NACK_PRLI: 3603 case SRB_NACK_LOGO: 3604 qla2x00_send_notify_ack_iocb(sp, pkt); 3605 break; 3606 case SRB_CTRL_VP: 3607 qla25xx_ctrlvp_iocb(sp, pkt); 3608 break; 3609 case SRB_PRLO_CMD: 3610 qla24xx_prlo_iocb(sp, pkt); 3611 break; 3612 default: 3613 break; 3614 } 3615 3616 if (sp->start_timer) 3617 add_timer(&sp->u.iocb_cmd.timer); 3618 3619 wmb(); 3620 qla2x00_start_iocbs(vha, qp->req); 3621 done: 3622 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 3623 return rval; 3624 } 3625 3626 static void 3627 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, 3628 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds) 3629 { 3630 uint16_t avail_dsds; 3631 struct dsd64 *cur_dsd; 3632 uint32_t req_data_len = 0; 3633 uint32_t rsp_data_len = 0; 3634 struct scatterlist *sg; 3635 int index; 3636 int entry_count = 1; 3637 struct bsg_job *bsg_job = sp->u.bsg_job; 3638 3639 /*Update entry type to indicate bidir command */ 3640 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); 3641 3642 /* Set the transfer direction, in this set both flags 3643 * Also set the BD_WRAP_BACK flag, firmware will take care 3644 * assigning DID=SID for outgoing pkts. 3645 */ 3646 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 3647 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 3648 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA | 3649 BD_WRAP_BACK); 3650 3651 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 3652 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len); 3653 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len); 3654 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); 3655 3656 vha->bidi_stats.transfer_bytes += req_data_len; 3657 vha->bidi_stats.io_count++; 3658 3659 vha->qla_stats.output_bytes += req_data_len; 3660 vha->qla_stats.output_requests++; 3661 3662 /* Only one dsd is available for bidirectional IOCB, remaining dsds 3663 * are bundled in continuation iocb 3664 */ 3665 avail_dsds = 1; 3666 cur_dsd = &cmd_pkt->fcp_dsd; 3667 3668 index = 0; 3669 3670 for_each_sg(bsg_job->request_payload.sg_list, sg, 3671 bsg_job->request_payload.sg_cnt, index) { 3672 cont_a64_entry_t *cont_pkt; 3673 3674 /* Allocate additional continuation packets */ 3675 if (avail_dsds == 0) { 3676 /* Continuation type 1 IOCB can accomodate 3677 * 5 DSDS 3678 */ 3679 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3680 cur_dsd = cont_pkt->dsd; 3681 avail_dsds = 5; 3682 entry_count++; 3683 } 3684 append_dsd64(&cur_dsd, sg); 3685 avail_dsds--; 3686 } 3687 /* For read request DSD will always goes to continuation IOCB 3688 * and follow the write DSD. If there is room on the current IOCB 3689 * then it is added to that IOCB else new continuation IOCB is 3690 * allocated. 3691 */ 3692 for_each_sg(bsg_job->reply_payload.sg_list, sg, 3693 bsg_job->reply_payload.sg_cnt, index) { 3694 cont_a64_entry_t *cont_pkt; 3695 3696 /* Allocate additional continuation packets */ 3697 if (avail_dsds == 0) { 3698 /* Continuation type 1 IOCB can accomodate 3699 * 5 DSDS 3700 */ 3701 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); 3702 cur_dsd = cont_pkt->dsd; 3703 avail_dsds = 5; 3704 entry_count++; 3705 } 3706 append_dsd64(&cur_dsd, sg); 3707 avail_dsds--; 3708 } 3709 /* This value should be same as number of IOCB required for this cmd */ 3710 cmd_pkt->entry_count = entry_count; 3711 } 3712 3713 int 3714 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) 3715 { 3716 3717 struct qla_hw_data *ha = vha->hw; 3718 unsigned long flags; 3719 uint32_t handle; 3720 uint16_t req_cnt; 3721 uint16_t cnt; 3722 uint32_t *clr_ptr; 3723 struct cmd_bidir *cmd_pkt = NULL; 3724 struct rsp_que *rsp; 3725 struct req_que *req; 3726 int rval = EXT_STATUS_OK; 3727 3728 rval = QLA_SUCCESS; 3729 3730 rsp = ha->rsp_q_map[0]; 3731 req = vha->req; 3732 3733 /* Send marker if required */ 3734 if (vha->marker_needed != 0) { 3735 if (qla2x00_marker(vha, ha->base_qpair, 3736 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) 3737 return EXT_STATUS_MAILBOX; 3738 vha->marker_needed = 0; 3739 } 3740 3741 /* Acquire ring specific lock */ 3742 spin_lock_irqsave(&ha->hardware_lock, flags); 3743 3744 handle = qla2xxx_get_next_handle(req); 3745 if (handle == 0) { 3746 rval = EXT_STATUS_BUSY; 3747 goto queuing_error; 3748 } 3749 3750 /* Calculate number of IOCB required */ 3751 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); 3752 3753 /* Check for room on request queue. */ 3754 if (req->cnt < req_cnt + 2) { 3755 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : 3756 RD_REG_DWORD_RELAXED(req->req_q_out); 3757 if (req->ring_index < cnt) 3758 req->cnt = cnt - req->ring_index; 3759 else 3760 req->cnt = req->length - 3761 (req->ring_index - cnt); 3762 } 3763 if (req->cnt < req_cnt + 2) { 3764 rval = EXT_STATUS_BUSY; 3765 goto queuing_error; 3766 } 3767 3768 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; 3769 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); 3770 3771 /* Zero out remaining portion of packet. */ 3772 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ 3773 clr_ptr = (uint32_t *)cmd_pkt + 2; 3774 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); 3775 3776 /* Set NPORT-ID (of vha)*/ 3777 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id); 3778 cmd_pkt->port_id[0] = vha->d_id.b.al_pa; 3779 cmd_pkt->port_id[1] = vha->d_id.b.area; 3780 cmd_pkt->port_id[2] = vha->d_id.b.domain; 3781 3782 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds); 3783 cmd_pkt->entry_status = (uint8_t) rsp->id; 3784 /* Build command packet. */ 3785 req->current_outstanding_cmd = handle; 3786 req->outstanding_cmds[handle] = sp; 3787 sp->handle = handle; 3788 req->cnt -= req_cnt; 3789 3790 /* Send the command to the firmware */ 3791 wmb(); 3792 qla2x00_start_iocbs(vha, req); 3793 queuing_error: 3794 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3795 return rval; 3796 } 3797