1 /* 2 * QLogic FCoE Offload Driver 3 * Copyright (c) 2016 Cavium Inc. 4 * 5 * This software is available under the terms of the GNU General Public License 6 * (GPL) Version 2, available from the file COPYING in the main directory of 7 * this source tree. 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/vmalloc.h> 11 #include "qedf.h" 12 #include <scsi/scsi_tcq.h> 13 14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 15 unsigned int timer_msec) 16 { 17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, 18 msecs_to_jiffies(timer_msec)); 19 } 20 21 static void qedf_cmd_timeout(struct work_struct *work) 22 { 23 24 struct qedf_ioreq *io_req = 25 container_of(work, struct qedf_ioreq, timeout_work.work); 26 struct qedf_ctx *qedf = io_req->fcport->qedf; 27 struct qedf_rport *fcport = io_req->fcport; 28 u8 op = 0; 29 30 switch (io_req->cmd_type) { 31 case QEDF_ABTS: 32 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", 33 io_req->xid); 34 /* Cleanup timed out ABTS */ 35 qedf_initiate_cleanup(io_req, true); 36 complete(&io_req->abts_done); 37 38 /* 39 * Need to call kref_put for reference taken when initiate_abts 40 * was called since abts_compl won't be called now that we've 41 * cleaned up the task. 42 */ 43 kref_put(&io_req->refcount, qedf_release_cmd); 44 45 /* 46 * Now that the original I/O and the ABTS are complete see 47 * if we need to reconnect to the target. 48 */ 49 qedf_restart_rport(fcport); 50 break; 51 case QEDF_ELS: 52 kref_get(&io_req->refcount); 53 /* 54 * Don't attempt to clean an ELS timeout as any subseqeunt 55 * ABTS or cleanup requests just hang. For now just free 56 * the resources of the original I/O and the RRQ 57 */ 58 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", 59 io_req->xid); 60 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 61 /* Call callback function to complete command */ 62 if (io_req->cb_func && io_req->cb_arg) { 63 op = io_req->cb_arg->op; 64 io_req->cb_func(io_req->cb_arg); 65 io_req->cb_arg = NULL; 66 } 67 qedf_initiate_cleanup(io_req, true); 68 kref_put(&io_req->refcount, qedf_release_cmd); 69 break; 70 case QEDF_SEQ_CLEANUP: 71 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " 72 "xid=0x%x.\n", io_req->xid); 73 qedf_initiate_cleanup(io_req, true); 74 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 75 qedf_process_seq_cleanup_compl(qedf, NULL, io_req); 76 break; 77 default: 78 break; 79 } 80 } 81 82 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) 83 { 84 struct io_bdt *bdt_info; 85 struct qedf_ctx *qedf = cmgr->qedf; 86 size_t bd_tbl_sz; 87 u16 min_xid = QEDF_MIN_XID; 88 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 89 int num_ios; 90 int i; 91 struct qedf_ioreq *io_req; 92 93 num_ios = max_xid - min_xid + 1; 94 95 /* Free fcoe_bdt_ctx structures */ 96 if (!cmgr->io_bdt_pool) 97 goto free_cmd_pool; 98 99 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); 100 for (i = 0; i < num_ios; i++) { 101 bdt_info = cmgr->io_bdt_pool[i]; 102 if (bdt_info->bd_tbl) { 103 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, 104 bdt_info->bd_tbl, bdt_info->bd_tbl_dma); 105 bdt_info->bd_tbl = NULL; 106 } 107 } 108 109 /* Destroy io_bdt pool */ 110 for (i = 0; i < num_ios; i++) { 111 kfree(cmgr->io_bdt_pool[i]); 112 cmgr->io_bdt_pool[i] = NULL; 113 } 114 115 kfree(cmgr->io_bdt_pool); 116 cmgr->io_bdt_pool = NULL; 117 118 free_cmd_pool: 119 120 for (i = 0; i < num_ios; i++) { 121 io_req = &cmgr->cmds[i]; 122 kfree(io_req->sgl_task_params); 123 kfree(io_req->task_params); 124 /* Make sure we free per command sense buffer */ 125 if (io_req->sense_buffer) 126 dma_free_coherent(&qedf->pdev->dev, 127 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, 128 io_req->sense_buffer_dma); 129 cancel_delayed_work_sync(&io_req->rrq_work); 130 } 131 132 /* Free command manager itself */ 133 vfree(cmgr); 134 } 135 136 static void qedf_handle_rrq(struct work_struct *work) 137 { 138 struct qedf_ioreq *io_req = 139 container_of(work, struct qedf_ioreq, rrq_work.work); 140 141 qedf_send_rrq(io_req); 142 143 } 144 145 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) 146 { 147 struct qedf_cmd_mgr *cmgr; 148 struct io_bdt *bdt_info; 149 struct qedf_ioreq *io_req; 150 u16 xid; 151 int i; 152 int num_ios; 153 u16 min_xid = QEDF_MIN_XID; 154 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 155 156 /* Make sure num_queues is already set before calling this function */ 157 if (!qedf->num_queues) { 158 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); 159 return NULL; 160 } 161 162 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 163 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " 164 "max_xid 0x%x.\n", min_xid, max_xid); 165 return NULL; 166 } 167 168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " 169 "0x%x.\n", min_xid, max_xid); 170 171 num_ios = max_xid - min_xid + 1; 172 173 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); 174 if (!cmgr) { 175 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); 176 return NULL; 177 } 178 179 cmgr->qedf = qedf; 180 spin_lock_init(&cmgr->lock); 181 182 /* 183 * Initialize I/O request fields. 184 */ 185 xid = QEDF_MIN_XID; 186 187 for (i = 0; i < num_ios; i++) { 188 io_req = &cmgr->cmds[i]; 189 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); 190 191 io_req->xid = xid++; 192 193 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); 194 195 /* Allocate DMA memory to hold sense buffer */ 196 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, 197 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, 198 GFP_KERNEL); 199 if (!io_req->sense_buffer) 200 goto mem_err; 201 202 /* Allocate task parameters to pass to f/w init funcions */ 203 io_req->task_params = kzalloc(sizeof(*io_req->task_params), 204 GFP_KERNEL); 205 if (!io_req->task_params) { 206 QEDF_ERR(&(qedf->dbg_ctx), 207 "Failed to allocate task_params for xid=0x%x\n", 208 i); 209 goto mem_err; 210 } 211 212 /* 213 * Allocate scatter/gather list info to pass to f/w init 214 * functions. 215 */ 216 io_req->sgl_task_params = kzalloc( 217 sizeof(struct scsi_sgl_task_params), GFP_KERNEL); 218 if (!io_req->sgl_task_params) { 219 QEDF_ERR(&(qedf->dbg_ctx), 220 "Failed to allocate sgl_task_params for xid=0x%x\n", 221 i); 222 goto mem_err; 223 } 224 } 225 226 /* Allocate pool of io_bdts - one for each qedf_ioreq */ 227 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), 228 GFP_KERNEL); 229 230 if (!cmgr->io_bdt_pool) { 231 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); 232 goto mem_err; 233 } 234 235 for (i = 0; i < num_ios; i++) { 236 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), 237 GFP_KERNEL); 238 if (!cmgr->io_bdt_pool[i]) { 239 QEDF_WARN(&(qedf->dbg_ctx), 240 "Failed to alloc io_bdt_pool[%d].\n", i); 241 goto mem_err; 242 } 243 } 244 245 for (i = 0; i < num_ios; i++) { 246 bdt_info = cmgr->io_bdt_pool[i]; 247 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, 248 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), 249 &bdt_info->bd_tbl_dma, GFP_KERNEL); 250 if (!bdt_info->bd_tbl) { 251 QEDF_WARN(&(qedf->dbg_ctx), 252 "Failed to alloc bdt_tbl[%d].\n", i); 253 goto mem_err; 254 } 255 } 256 atomic_set(&cmgr->free_list_cnt, num_ios); 257 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 258 "cmgr->free_list_cnt=%d.\n", 259 atomic_read(&cmgr->free_list_cnt)); 260 261 return cmgr; 262 263 mem_err: 264 qedf_cmd_mgr_free(cmgr); 265 return NULL; 266 } 267 268 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) 269 { 270 struct qedf_ctx *qedf = fcport->qedf; 271 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; 272 struct qedf_ioreq *io_req = NULL; 273 struct io_bdt *bd_tbl; 274 u16 xid; 275 uint32_t free_sqes; 276 int i; 277 unsigned long flags; 278 279 free_sqes = atomic_read(&fcport->free_sqes); 280 281 if (!free_sqes) { 282 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 283 "Returning NULL, free_sqes=%d.\n ", 284 free_sqes); 285 goto out_failed; 286 } 287 288 /* Limit the number of outstanding R/W tasks */ 289 if ((atomic_read(&fcport->num_active_ios) >= 290 NUM_RW_TASKS_PER_CONNECTION)) { 291 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 292 "Returning NULL, num_active_ios=%d.\n", 293 atomic_read(&fcport->num_active_ios)); 294 goto out_failed; 295 } 296 297 /* Limit global TIDs certain tasks */ 298 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { 299 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 300 "Returning NULL, free_list_cnt=%d.\n", 301 atomic_read(&cmd_mgr->free_list_cnt)); 302 goto out_failed; 303 } 304 305 spin_lock_irqsave(&cmd_mgr->lock, flags); 306 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 307 io_req = &cmd_mgr->cmds[cmd_mgr->idx]; 308 cmd_mgr->idx++; 309 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) 310 cmd_mgr->idx = 0; 311 312 /* Check to make sure command was previously freed */ 313 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) 314 break; 315 } 316 317 if (i == FCOE_PARAMS_NUM_TASKS) { 318 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 319 goto out_failed; 320 } 321 322 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 323 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 324 325 atomic_inc(&fcport->num_active_ios); 326 atomic_dec(&fcport->free_sqes); 327 xid = io_req->xid; 328 atomic_dec(&cmd_mgr->free_list_cnt); 329 330 io_req->cmd_mgr = cmd_mgr; 331 io_req->fcport = fcport; 332 333 /* Hold the io_req against deletion */ 334 kref_init(&io_req->refcount); 335 336 /* Bind io_bdt for this io_req */ 337 /* Have a static link between io_req and io_bdt_pool */ 338 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 339 if (bd_tbl == NULL) { 340 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); 341 kref_put(&io_req->refcount, qedf_release_cmd); 342 goto out_failed; 343 } 344 bd_tbl->io_req = io_req; 345 io_req->cmd_type = cmd_type; 346 io_req->tm_flags = 0; 347 348 /* Reset sequence offset data */ 349 io_req->rx_buf_off = 0; 350 io_req->tx_buf_off = 0; 351 io_req->rx_id = 0xffff; /* No OX_ID */ 352 353 return io_req; 354 355 out_failed: 356 /* Record failure for stats and return NULL to caller */ 357 qedf->alloc_failures++; 358 return NULL; 359 } 360 361 static void qedf_free_mp_resc(struct qedf_ioreq *io_req) 362 { 363 struct qedf_mp_req *mp_req = &(io_req->mp_req); 364 struct qedf_ctx *qedf = io_req->fcport->qedf; 365 uint64_t sz = sizeof(struct scsi_sge); 366 367 /* clear tm flags */ 368 if (mp_req->mp_req_bd) { 369 dma_free_coherent(&qedf->pdev->dev, sz, 370 mp_req->mp_req_bd, mp_req->mp_req_bd_dma); 371 mp_req->mp_req_bd = NULL; 372 } 373 if (mp_req->mp_resp_bd) { 374 dma_free_coherent(&qedf->pdev->dev, sz, 375 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); 376 mp_req->mp_resp_bd = NULL; 377 } 378 if (mp_req->req_buf) { 379 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 380 mp_req->req_buf, mp_req->req_buf_dma); 381 mp_req->req_buf = NULL; 382 } 383 if (mp_req->resp_buf) { 384 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 385 mp_req->resp_buf, mp_req->resp_buf_dma); 386 mp_req->resp_buf = NULL; 387 } 388 } 389 390 void qedf_release_cmd(struct kref *ref) 391 { 392 struct qedf_ioreq *io_req = 393 container_of(ref, struct qedf_ioreq, refcount); 394 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 395 struct qedf_rport *fcport = io_req->fcport; 396 397 if (io_req->cmd_type == QEDF_ELS || 398 io_req->cmd_type == QEDF_TASK_MGMT_CMD) 399 qedf_free_mp_resc(io_req); 400 401 atomic_inc(&cmd_mgr->free_list_cnt); 402 atomic_dec(&fcport->num_active_ios); 403 if (atomic_read(&fcport->num_active_ios) < 0) 404 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); 405 406 /* Increment task retry identifier now that the request is released */ 407 io_req->task_retry_identifier++; 408 409 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 410 } 411 412 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len, 413 int bd_index) 414 { 415 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 416 int frag_size, sg_frags; 417 418 sg_frags = 0; 419 while (sg_len) { 420 if (sg_len > QEDF_BD_SPLIT_SZ) 421 frag_size = QEDF_BD_SPLIT_SZ; 422 else 423 frag_size = sg_len; 424 bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr); 425 bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr); 426 bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size; 427 428 addr += (u64)frag_size; 429 sg_frags++; 430 sg_len -= frag_size; 431 } 432 return sg_frags; 433 } 434 435 static int qedf_map_sg(struct qedf_ioreq *io_req) 436 { 437 struct scsi_cmnd *sc = io_req->sc_cmd; 438 struct Scsi_Host *host = sc->device->host; 439 struct fc_lport *lport = shost_priv(host); 440 struct qedf_ctx *qedf = lport_priv(lport); 441 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 442 struct scatterlist *sg; 443 int byte_count = 0; 444 int sg_count = 0; 445 int bd_count = 0; 446 int sg_frags; 447 unsigned int sg_len; 448 u64 addr, end_addr; 449 int i; 450 451 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), 452 scsi_sg_count(sc), sc->sc_data_direction); 453 454 sg = scsi_sglist(sc); 455 456 /* 457 * New condition to send single SGE as cached-SGL with length less 458 * than 64k. 459 */ 460 if ((sg_count == 1) && (sg_dma_len(sg) <= 461 QEDF_MAX_SGLEN_FOR_CACHESGL)) { 462 sg_len = sg_dma_len(sg); 463 addr = (u64)sg_dma_address(sg); 464 465 bd[bd_count].sge_addr.lo = (addr & 0xffffffff); 466 bd[bd_count].sge_addr.hi = (addr >> 32); 467 bd[bd_count].sge_len = (u16)sg_len; 468 469 return ++bd_count; 470 } 471 472 scsi_for_each_sg(sc, sg, sg_count, i) { 473 sg_len = sg_dma_len(sg); 474 addr = (u64)sg_dma_address(sg); 475 end_addr = (u64)(addr + sg_len); 476 477 /* 478 * First s/g element in the list so check if the end_addr 479 * is paged aligned. Also check to make sure the length is 480 * at least page size. 481 */ 482 if ((i == 0) && (sg_count > 1) && 483 ((end_addr % QEDF_PAGE_SIZE) || 484 sg_len < QEDF_PAGE_SIZE)) 485 io_req->use_slowpath = true; 486 /* 487 * Last s/g element so check if the start address is paged 488 * aligned. 489 */ 490 else if ((i == (sg_count - 1)) && (sg_count > 1) && 491 (addr % QEDF_PAGE_SIZE)) 492 io_req->use_slowpath = true; 493 /* 494 * Intermediate s/g element so check if start and end address 495 * is page aligned. 496 */ 497 else if ((i != 0) && (i != (sg_count - 1)) && 498 ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE))) 499 io_req->use_slowpath = true; 500 501 if (sg_len > QEDF_MAX_BD_LEN) { 502 sg_frags = qedf_split_bd(io_req, addr, sg_len, 503 bd_count); 504 } else { 505 sg_frags = 1; 506 bd[bd_count].sge_addr.lo = U64_LO(addr); 507 bd[bd_count].sge_addr.hi = U64_HI(addr); 508 bd[bd_count].sge_len = (uint16_t)sg_len; 509 } 510 511 bd_count += sg_frags; 512 byte_count += sg_len; 513 } 514 515 if (byte_count != scsi_bufflen(sc)) 516 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " 517 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, 518 scsi_bufflen(sc), io_req->xid); 519 520 return bd_count; 521 } 522 523 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) 524 { 525 struct scsi_cmnd *sc = io_req->sc_cmd; 526 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 527 int bd_count; 528 529 if (scsi_sg_count(sc)) { 530 bd_count = qedf_map_sg(io_req); 531 if (bd_count == 0) 532 return -ENOMEM; 533 } else { 534 bd_count = 0; 535 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; 536 bd[0].sge_len = 0; 537 } 538 io_req->bd_tbl->bd_valid = bd_count; 539 540 return 0; 541 } 542 543 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, 544 struct fcp_cmnd *fcp_cmnd) 545 { 546 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 547 548 /* fcp_cmnd is 32 bytes */ 549 memset(fcp_cmnd, 0, FCP_CMND_LEN); 550 551 /* 8 bytes: SCSI LUN info */ 552 int_to_scsilun(sc_cmd->device->lun, 553 (struct scsi_lun *)&fcp_cmnd->fc_lun); 554 555 /* 4 bytes: flag info */ 556 fcp_cmnd->fc_pri_ta = 0; 557 fcp_cmnd->fc_tm_flags = io_req->tm_flags; 558 fcp_cmnd->fc_flags = io_req->io_req_flags; 559 fcp_cmnd->fc_cmdref = 0; 560 561 /* Populate data direction */ 562 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 564 } else { 565 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 566 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; 567 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) 568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 569 } 570 571 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 572 573 /* 16 bytes: CDB information */ 574 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) 575 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 576 577 /* 4 bytes: FCP data length */ 578 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 579 } 580 581 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, 582 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, 583 struct fcoe_wqe *sqe) 584 { 585 enum fcoe_task_type task_type; 586 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 587 struct io_bdt *bd_tbl = io_req->bd_tbl; 588 u8 fcp_cmnd[32]; 589 u32 tmp_fcp_cmnd[8]; 590 int bd_count = 0; 591 struct qedf_ctx *qedf = fcport->qedf; 592 uint16_t cq_idx = smp_processor_id() % qedf->num_queues; 593 struct regpair sense_data_buffer_phys_addr; 594 u32 tx_io_size = 0; 595 u32 rx_io_size = 0; 596 int i, cnt; 597 598 /* Note init_initiator_rw_fcoe_task memsets the task context */ 599 io_req->task = task_ctx; 600 memset(task_ctx, 0, sizeof(struct fcoe_task_context)); 601 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); 602 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 603 604 /* Set task type bassed on DMA directio of command */ 605 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 606 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 607 } else { 608 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 609 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 610 tx_io_size = io_req->data_xfer_len; 611 } else { 612 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 613 rx_io_size = io_req->data_xfer_len; 614 } 615 } 616 617 /* Setup the fields for fcoe_task_params */ 618 io_req->task_params->context = task_ctx; 619 io_req->task_params->sqe = sqe; 620 io_req->task_params->task_type = task_type; 621 io_req->task_params->tx_io_size = tx_io_size; 622 io_req->task_params->rx_io_size = rx_io_size; 623 io_req->task_params->conn_cid = fcport->fw_cid; 624 io_req->task_params->itid = io_req->xid; 625 io_req->task_params->cq_rss_number = cq_idx; 626 io_req->task_params->is_tape_device = fcport->dev_type; 627 628 /* Fill in information for scatter/gather list */ 629 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { 630 bd_count = bd_tbl->bd_valid; 631 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; 632 io_req->sgl_task_params->sgl_phys_addr.lo = 633 U64_LO(bd_tbl->bd_tbl_dma); 634 io_req->sgl_task_params->sgl_phys_addr.hi = 635 U64_HI(bd_tbl->bd_tbl_dma); 636 io_req->sgl_task_params->num_sges = bd_count; 637 io_req->sgl_task_params->total_buffer_size = 638 scsi_bufflen(io_req->sc_cmd); 639 io_req->sgl_task_params->small_mid_sge = 640 io_req->use_slowpath; 641 } 642 643 /* Fill in physical address of sense buffer */ 644 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); 645 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); 646 647 /* fill FCP_CMND IU */ 648 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); 649 650 /* Swap fcp_cmnd since FC is big endian */ 651 cnt = sizeof(struct fcp_cmnd) / sizeof(u32); 652 for (i = 0; i < cnt; i++) { 653 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); 654 } 655 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); 656 657 init_initiator_rw_fcoe_task(io_req->task_params, 658 io_req->sgl_task_params, 659 sense_data_buffer_phys_addr, 660 io_req->task_retry_identifier, fcp_cmnd); 661 662 /* Increment SGL type counters */ 663 if (bd_count == 1) { 664 qedf->single_sge_ios++; 665 io_req->sge_type = QEDF_IOREQ_SINGLE_SGE; 666 } else if (io_req->use_slowpath) { 667 qedf->slow_sge_ios++; 668 io_req->sge_type = QEDF_IOREQ_SLOW_SGE; 669 } else { 670 qedf->fast_sge_ios++; 671 io_req->sge_type = QEDF_IOREQ_FAST_SGE; 672 } 673 } 674 675 void qedf_init_mp_task(struct qedf_ioreq *io_req, 676 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) 677 { 678 struct qedf_mp_req *mp_req = &(io_req->mp_req); 679 struct qedf_rport *fcport = io_req->fcport; 680 struct qedf_ctx *qedf = io_req->fcport->qedf; 681 struct fc_frame_header *fc_hdr; 682 struct fcoe_tx_mid_path_params task_fc_hdr; 683 struct scsi_sgl_task_params tx_sgl_task_params; 684 struct scsi_sgl_task_params rx_sgl_task_params; 685 686 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 687 "Initializing MP task for cmd_type=%d\n", 688 io_req->cmd_type); 689 690 qedf->control_requests++; 691 692 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 693 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 694 memset(task_ctx, 0, sizeof(struct fcoe_task_context)); 695 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); 696 697 /* Setup the task from io_req for easy reference */ 698 io_req->task = task_ctx; 699 700 /* Setup the fields for fcoe_task_params */ 701 io_req->task_params->context = task_ctx; 702 io_req->task_params->sqe = sqe; 703 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; 704 io_req->task_params->tx_io_size = io_req->data_xfer_len; 705 /* rx_io_size tells the f/w how large a response buffer we have */ 706 io_req->task_params->rx_io_size = PAGE_SIZE; 707 io_req->task_params->conn_cid = fcport->fw_cid; 708 io_req->task_params->itid = io_req->xid; 709 /* Return middle path commands on CQ 0 */ 710 io_req->task_params->cq_rss_number = 0; 711 io_req->task_params->is_tape_device = fcport->dev_type; 712 713 fc_hdr = &(mp_req->req_fc_hdr); 714 /* Set OX_ID and RX_ID based on driver task id */ 715 fc_hdr->fh_ox_id = io_req->xid; 716 fc_hdr->fh_rx_id = htons(0xffff); 717 718 /* Set up FC header information */ 719 task_fc_hdr.parameter = fc_hdr->fh_parm_offset; 720 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; 721 task_fc_hdr.type = fc_hdr->fh_type; 722 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; 723 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; 724 task_fc_hdr.rx_id = fc_hdr->fh_rx_id; 725 task_fc_hdr.ox_id = fc_hdr->fh_ox_id; 726 727 /* Set up s/g list parameters for request buffer */ 728 tx_sgl_task_params.sgl = mp_req->mp_req_bd; 729 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); 730 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); 731 tx_sgl_task_params.num_sges = 1; 732 /* Set PAGE_SIZE for now since sg element is that size ??? */ 733 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; 734 tx_sgl_task_params.small_mid_sge = 0; 735 736 /* Set up s/g list parameters for request buffer */ 737 rx_sgl_task_params.sgl = mp_req->mp_resp_bd; 738 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); 739 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); 740 rx_sgl_task_params.num_sges = 1; 741 /* Set PAGE_SIZE for now since sg element is that size ??? */ 742 rx_sgl_task_params.total_buffer_size = PAGE_SIZE; 743 rx_sgl_task_params.small_mid_sge = 0; 744 745 746 /* 747 * Last arg is 0 as previous code did not set that we wanted the 748 * fc header information. 749 */ 750 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, 751 &task_fc_hdr, 752 &tx_sgl_task_params, 753 &rx_sgl_task_params, 0); 754 755 /* Midpath requests always consume 1 SGE */ 756 qedf->single_sge_ios++; 757 } 758 759 /* Presumed that fcport->rport_lock is held */ 760 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) 761 { 762 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); 763 u16 rval; 764 765 rval = fcport->sq_prod_idx; 766 767 /* Adjust ring index */ 768 fcport->sq_prod_idx++; 769 fcport->fw_sq_prod_idx++; 770 if (fcport->sq_prod_idx == total_sqe) 771 fcport->sq_prod_idx = 0; 772 773 return rval; 774 } 775 776 void qedf_ring_doorbell(struct qedf_rport *fcport) 777 { 778 struct fcoe_db_data dbell = { 0 }; 779 780 dbell.agg_flags = 0; 781 782 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; 783 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; 784 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << 785 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; 786 787 dbell.sq_prod = fcport->fw_sq_prod_idx; 788 writel(*(u32 *)&dbell, fcport->p_doorbell); 789 /* Make sure SQ index is updated so f/w prcesses requests in order */ 790 wmb(); 791 mmiowb(); 792 } 793 794 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, 795 int8_t direction) 796 { 797 struct qedf_ctx *qedf = fcport->qedf; 798 struct qedf_io_log *io_log; 799 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 800 unsigned long flags; 801 uint8_t op; 802 803 spin_lock_irqsave(&qedf->io_trace_lock, flags); 804 805 io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; 806 io_log->direction = direction; 807 io_log->task_id = io_req->xid; 808 io_log->port_id = fcport->rdata->ids.port_id; 809 io_log->lun = sc_cmd->device->lun; 810 io_log->op = op = sc_cmd->cmnd[0]; 811 io_log->lba[0] = sc_cmd->cmnd[2]; 812 io_log->lba[1] = sc_cmd->cmnd[3]; 813 io_log->lba[2] = sc_cmd->cmnd[4]; 814 io_log->lba[3] = sc_cmd->cmnd[5]; 815 io_log->bufflen = scsi_bufflen(sc_cmd); 816 io_log->sg_count = scsi_sg_count(sc_cmd); 817 io_log->result = sc_cmd->result; 818 io_log->jiffies = jiffies; 819 io_log->refcount = kref_read(&io_req->refcount); 820 821 if (direction == QEDF_IO_TRACE_REQ) { 822 /* For requests we only care abot the submission CPU */ 823 io_log->req_cpu = io_req->cpu; 824 io_log->int_cpu = 0; 825 io_log->rsp_cpu = 0; 826 } else if (direction == QEDF_IO_TRACE_RSP) { 827 io_log->req_cpu = io_req->cpu; 828 io_log->int_cpu = io_req->int_cpu; 829 io_log->rsp_cpu = smp_processor_id(); 830 } 831 832 io_log->sge_type = io_req->sge_type; 833 834 qedf->io_trace_idx++; 835 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) 836 qedf->io_trace_idx = 0; 837 838 spin_unlock_irqrestore(&qedf->io_trace_lock, flags); 839 } 840 841 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) 842 { 843 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 844 struct Scsi_Host *host = sc_cmd->device->host; 845 struct fc_lport *lport = shost_priv(host); 846 struct qedf_ctx *qedf = lport_priv(lport); 847 struct fcoe_task_context *task_ctx; 848 u16 xid; 849 enum fcoe_task_type req_type = 0; 850 struct fcoe_wqe *sqe; 851 u16 sqe_idx; 852 853 /* Initialize rest of io_req fileds */ 854 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 855 sc_cmd->SCp.ptr = (char *)io_req; 856 io_req->use_slowpath = false; /* Assume fast SGL by default */ 857 858 /* Record which cpu this request is associated with */ 859 io_req->cpu = smp_processor_id(); 860 861 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 862 req_type = FCOE_TASK_TYPE_READ_INITIATOR; 863 io_req->io_req_flags = QEDF_READ; 864 qedf->input_requests++; 865 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 866 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 867 io_req->io_req_flags = QEDF_WRITE; 868 qedf->output_requests++; 869 } else { 870 io_req->io_req_flags = 0; 871 qedf->control_requests++; 872 } 873 874 xid = io_req->xid; 875 876 /* Build buffer descriptor list for firmware from sg list */ 877 if (qedf_build_bd_list_from_sg(io_req)) { 878 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); 879 kref_put(&io_req->refcount, qedf_release_cmd); 880 return -EAGAIN; 881 } 882 883 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 884 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); 885 kref_put(&io_req->refcount, qedf_release_cmd); 886 } 887 888 /* Obtain free SQE */ 889 sqe_idx = qedf_get_sqe_idx(fcport); 890 sqe = &fcport->sq[sqe_idx]; 891 memset(sqe, 0, sizeof(struct fcoe_wqe)); 892 893 /* Get the task context */ 894 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 895 if (!task_ctx) { 896 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", 897 xid); 898 kref_put(&io_req->refcount, qedf_release_cmd); 899 return -EINVAL; 900 } 901 902 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); 903 904 /* Ring doorbell */ 905 qedf_ring_doorbell(fcport); 906 907 if (qedf_io_tracing && io_req->sc_cmd) 908 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); 909 910 return false; 911 } 912 913 int 914 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) 915 { 916 struct fc_lport *lport = shost_priv(host); 917 struct qedf_ctx *qedf = lport_priv(lport); 918 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 919 struct fc_rport_libfc_priv *rp = rport->dd_data; 920 struct qedf_rport *fcport = rport->dd_data; 921 struct qedf_ioreq *io_req; 922 int rc = 0; 923 int rval; 924 unsigned long flags = 0; 925 926 927 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 928 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 929 sc_cmd->result = DID_NO_CONNECT << 16; 930 sc_cmd->scsi_done(sc_cmd); 931 return 0; 932 } 933 934 rval = fc_remote_port_chkready(rport); 935 if (rval) { 936 sc_cmd->result = rval; 937 sc_cmd->scsi_done(sc_cmd); 938 return 0; 939 } 940 941 /* Retry command if we are doing a qed drain operation */ 942 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 943 rc = SCSI_MLQUEUE_HOST_BUSY; 944 goto exit_qcmd; 945 } 946 947 if (lport->state != LPORT_ST_READY || 948 atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 949 rc = SCSI_MLQUEUE_HOST_BUSY; 950 goto exit_qcmd; 951 } 952 953 /* rport and tgt are allocated together, so tgt should be non-NULL */ 954 fcport = (struct qedf_rport *)&rp[1]; 955 956 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 957 /* 958 * Session is not offloaded yet. Let SCSI-ml retry 959 * the command. 960 */ 961 rc = SCSI_MLQUEUE_TARGET_BUSY; 962 goto exit_qcmd; 963 } 964 if (fcport->retry_delay_timestamp) { 965 if (time_after(jiffies, fcport->retry_delay_timestamp)) { 966 fcport->retry_delay_timestamp = 0; 967 } else { 968 /* If retry_delay timer is active, flow off the ML */ 969 rc = SCSI_MLQUEUE_TARGET_BUSY; 970 goto exit_qcmd; 971 } 972 } 973 974 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); 975 if (!io_req) { 976 rc = SCSI_MLQUEUE_HOST_BUSY; 977 goto exit_qcmd; 978 } 979 980 io_req->sc_cmd = sc_cmd; 981 982 /* Take fcport->rport_lock for posting to fcport send queue */ 983 spin_lock_irqsave(&fcport->rport_lock, flags); 984 if (qedf_post_io_req(fcport, io_req)) { 985 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); 986 /* Return SQE to pool */ 987 atomic_inc(&fcport->free_sqes); 988 rc = SCSI_MLQUEUE_HOST_BUSY; 989 } 990 spin_unlock_irqrestore(&fcport->rport_lock, flags); 991 992 exit_qcmd: 993 return rc; 994 } 995 996 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, 997 struct fcoe_cqe_rsp_info *fcp_rsp) 998 { 999 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1000 struct qedf_ctx *qedf = io_req->fcport->qedf; 1001 u8 rsp_flags = fcp_rsp->rsp_flags.flags; 1002 int fcp_sns_len = 0; 1003 int fcp_rsp_len = 0; 1004 uint8_t *rsp_info, *sense_data; 1005 1006 io_req->fcp_status = FC_GOOD; 1007 io_req->fcp_resid = 0; 1008 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | 1009 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) 1010 io_req->fcp_resid = fcp_rsp->fcp_resid; 1011 1012 io_req->scsi_comp_flags = rsp_flags; 1013 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1014 fcp_rsp->scsi_status_code; 1015 1016 if (rsp_flags & 1017 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) 1018 fcp_rsp_len = fcp_rsp->fcp_rsp_len; 1019 1020 if (rsp_flags & 1021 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) 1022 fcp_sns_len = fcp_rsp->fcp_sns_len; 1023 1024 io_req->fcp_rsp_len = fcp_rsp_len; 1025 io_req->fcp_sns_len = fcp_sns_len; 1026 rsp_info = sense_data = io_req->sense_buffer; 1027 1028 /* fetch fcp_rsp_code */ 1029 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1030 /* Only for task management function */ 1031 io_req->fcp_rsp_code = rsp_info[3]; 1032 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1033 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); 1034 /* Adjust sense-data location. */ 1035 sense_data += fcp_rsp_len; 1036 } 1037 1038 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1039 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1040 "Truncating sense buffer\n"); 1041 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1042 } 1043 1044 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1045 if (fcp_sns_len) 1046 memcpy(sc_cmd->sense_buffer, sense_data, 1047 fcp_sns_len); 1048 } 1049 1050 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) 1051 { 1052 struct scsi_cmnd *sc = io_req->sc_cmd; 1053 1054 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1055 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), 1056 scsi_sg_count(sc), sc->sc_data_direction); 1057 io_req->bd_tbl->bd_valid = 0; 1058 } 1059 } 1060 1061 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1062 struct qedf_ioreq *io_req) 1063 { 1064 u16 xid, rval; 1065 struct fcoe_task_context *task_ctx; 1066 struct scsi_cmnd *sc_cmd; 1067 struct fcoe_cqe_rsp_info *fcp_rsp; 1068 struct qedf_rport *fcport; 1069 int refcount; 1070 u16 scope, qualifier = 0; 1071 u8 fw_residual_flag = 0; 1072 1073 if (!io_req) 1074 return; 1075 if (!cqe) 1076 return; 1077 1078 xid = io_req->xid; 1079 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 1080 sc_cmd = io_req->sc_cmd; 1081 fcp_rsp = &cqe->cqe_info.rsp_info; 1082 1083 if (!sc_cmd) { 1084 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1085 return; 1086 } 1087 1088 if (!sc_cmd->SCp.ptr) { 1089 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1090 "another context.\n"); 1091 return; 1092 } 1093 1094 if (!sc_cmd->request) { 1095 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " 1096 "sc_cmd=%p.\n", sc_cmd); 1097 return; 1098 } 1099 1100 if (!sc_cmd->request->special) { 1101 QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so " 1102 "request not valid, sc_cmd=%p.\n", sc_cmd); 1103 return; 1104 } 1105 1106 if (!sc_cmd->request->q) { 1107 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " 1108 "is not valid, sc_cmd=%p.\n", sc_cmd); 1109 return; 1110 } 1111 1112 fcport = io_req->fcport; 1113 1114 qedf_parse_fcp_rsp(io_req, fcp_rsp); 1115 1116 qedf_unmap_sg_list(qedf, io_req); 1117 1118 /* Check for FCP transport error */ 1119 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { 1120 QEDF_ERR(&(qedf->dbg_ctx), 1121 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " 1122 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, 1123 io_req->fcp_rsp_code); 1124 sc_cmd->result = DID_BUS_BUSY << 16; 1125 goto out; 1126 } 1127 1128 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, 1129 FCOE_CQE_RSP_INFO_FW_UNDERRUN); 1130 if (fw_residual_flag) { 1131 QEDF_ERR(&(qedf->dbg_ctx), 1132 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x " 1133 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid, 1134 fcp_rsp->rsp_flags.flags, io_req->fcp_resid, 1135 cqe->cqe_info.rsp_info.fw_residual); 1136 1137 if (io_req->cdb_status == 0) 1138 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; 1139 else 1140 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1141 1142 /* Abort the command since we did not get all the data */ 1143 init_completion(&io_req->abts_done); 1144 rval = qedf_initiate_abts(io_req, true); 1145 if (rval) { 1146 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1147 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; 1148 } 1149 1150 /* 1151 * Set resid to the whole buffer length so we won't try to resue 1152 * any previously data. 1153 */ 1154 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1155 goto out; 1156 } 1157 1158 switch (io_req->fcp_status) { 1159 case FC_GOOD: 1160 if (io_req->cdb_status == 0) { 1161 /* Good I/O completion */ 1162 sc_cmd->result = DID_OK << 16; 1163 } else { 1164 refcount = kref_read(&io_req->refcount); 1165 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1166 "%d:0:%d:%lld xid=0x%0x op=0x%02x " 1167 "lba=%02x%02x%02x%02x cdb_status=%d " 1168 "fcp_resid=0x%x refcount=%d.\n", 1169 qedf->lport->host->host_no, sc_cmd->device->id, 1170 sc_cmd->device->lun, io_req->xid, 1171 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], 1172 sc_cmd->cmnd[4], sc_cmd->cmnd[5], 1173 io_req->cdb_status, io_req->fcp_resid, 1174 refcount); 1175 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1176 1177 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1178 io_req->cdb_status == SAM_STAT_BUSY) { 1179 /* 1180 * Check whether we need to set retry_delay at 1181 * all based on retry_delay module parameter 1182 * and the status qualifier. 1183 */ 1184 1185 /* Upper 2 bits */ 1186 scope = fcp_rsp->retry_delay_timer & 0xC000; 1187 /* Lower 14 bits */ 1188 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; 1189 1190 if (qedf_retry_delay && 1191 scope > 0 && qualifier > 0 && 1192 qualifier <= 0x3FEF) { 1193 /* Check we don't go over the max */ 1194 if (qualifier > QEDF_RETRY_DELAY_MAX) 1195 qualifier = 1196 QEDF_RETRY_DELAY_MAX; 1197 fcport->retry_delay_timestamp = 1198 jiffies + (qualifier * HZ / 10); 1199 } 1200 } 1201 } 1202 if (io_req->fcp_resid) 1203 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1204 break; 1205 default: 1206 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", 1207 io_req->fcp_status); 1208 break; 1209 } 1210 1211 out: 1212 if (qedf_io_tracing) 1213 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); 1214 1215 io_req->sc_cmd = NULL; 1216 sc_cmd->SCp.ptr = NULL; 1217 sc_cmd->scsi_done(sc_cmd); 1218 kref_put(&io_req->refcount, qedf_release_cmd); 1219 } 1220 1221 /* Return a SCSI command in some other context besides a normal completion */ 1222 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 1223 int result) 1224 { 1225 u16 xid; 1226 struct scsi_cmnd *sc_cmd; 1227 int refcount; 1228 1229 if (!io_req) 1230 return; 1231 1232 xid = io_req->xid; 1233 sc_cmd = io_req->sc_cmd; 1234 1235 if (!sc_cmd) { 1236 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1237 return; 1238 } 1239 1240 if (!sc_cmd->SCp.ptr) { 1241 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1242 "another context.\n"); 1243 return; 1244 } 1245 1246 qedf_unmap_sg_list(qedf, io_req); 1247 1248 sc_cmd->result = result << 16; 1249 refcount = kref_read(&io_req->refcount); 1250 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " 1251 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " 1252 "allowed=%d retries=%d refcount=%d.\n", 1253 qedf->lport->host->host_no, sc_cmd->device->id, 1254 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], 1255 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], 1256 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, 1257 refcount); 1258 1259 /* 1260 * Set resid to the whole buffer length so we won't try to resue any 1261 * previously read data 1262 */ 1263 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1264 1265 if (qedf_io_tracing) 1266 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); 1267 1268 io_req->sc_cmd = NULL; 1269 sc_cmd->SCp.ptr = NULL; 1270 sc_cmd->scsi_done(sc_cmd); 1271 kref_put(&io_req->refcount, qedf_release_cmd); 1272 } 1273 1274 /* 1275 * Handle warning type CQE completions. This is mainly used for REC timer 1276 * popping. 1277 */ 1278 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1279 struct qedf_ioreq *io_req) 1280 { 1281 int rval, i; 1282 struct qedf_rport *fcport = io_req->fcport; 1283 u64 err_warn_bit_map; 1284 u8 err_warn = 0xff; 1285 1286 if (!cqe) 1287 return; 1288 1289 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " 1290 "xid=0x%x\n", io_req->xid); 1291 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1292 "err_warn_bitmap=%08x:%08x\n", 1293 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1294 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1295 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1296 "rx_buff_off=%08x, rx_id=%04x\n", 1297 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1298 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1299 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1300 1301 /* Normalize the error bitmap value to an just an unsigned int */ 1302 err_warn_bit_map = (u64) 1303 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | 1304 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; 1305 for (i = 0; i < 64; i++) { 1306 if (err_warn_bit_map & (u64)((u64)1 << i)) { 1307 err_warn = i; 1308 break; 1309 } 1310 } 1311 1312 /* Check if REC TOV expired if this is a tape device */ 1313 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { 1314 if (err_warn == 1315 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { 1316 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); 1317 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { 1318 io_req->rx_buf_off = 1319 cqe->cqe_info.err_info.rx_buf_off; 1320 io_req->tx_buf_off = 1321 cqe->cqe_info.err_info.tx_buf_off; 1322 io_req->rx_id = cqe->cqe_info.err_info.rx_id; 1323 rval = qedf_send_rec(io_req); 1324 /* 1325 * We only want to abort the io_req if we 1326 * can't queue the REC command as we want to 1327 * keep the exchange open for recovery. 1328 */ 1329 if (rval) 1330 goto send_abort; 1331 } 1332 return; 1333 } 1334 } 1335 1336 send_abort: 1337 init_completion(&io_req->abts_done); 1338 rval = qedf_initiate_abts(io_req, true); 1339 if (rval) 1340 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1341 } 1342 1343 /* Cleanup a command when we receive an error detection completion */ 1344 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1345 struct qedf_ioreq *io_req) 1346 { 1347 int rval; 1348 1349 if (!cqe) 1350 return; 1351 1352 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " 1353 "xid=0x%x\n", io_req->xid); 1354 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1355 "err_warn_bitmap=%08x:%08x\n", 1356 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1357 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1358 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1359 "rx_buff_off=%08x, rx_id=%04x\n", 1360 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1361 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1362 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1363 1364 if (qedf->stop_io_on_error) { 1365 qedf_stop_all_io(qedf); 1366 return; 1367 } 1368 1369 init_completion(&io_req->abts_done); 1370 rval = qedf_initiate_abts(io_req, true); 1371 if (rval) 1372 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1373 } 1374 1375 static void qedf_flush_els_req(struct qedf_ctx *qedf, 1376 struct qedf_ioreq *els_req) 1377 { 1378 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1379 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, 1380 kref_read(&els_req->refcount)); 1381 1382 /* 1383 * Need to distinguish this from a timeout when calling the 1384 * els_req->cb_func. 1385 */ 1386 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; 1387 1388 /* Cancel the timer */ 1389 cancel_delayed_work_sync(&els_req->timeout_work); 1390 1391 /* Call callback function to complete command */ 1392 if (els_req->cb_func && els_req->cb_arg) { 1393 els_req->cb_func(els_req->cb_arg); 1394 els_req->cb_arg = NULL; 1395 } 1396 1397 /* Release kref for original initiate_els */ 1398 kref_put(&els_req->refcount, qedf_release_cmd); 1399 } 1400 1401 /* A value of -1 for lun is a wild card that means flush all 1402 * active SCSI I/Os for the target. 1403 */ 1404 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) 1405 { 1406 struct qedf_ioreq *io_req; 1407 struct qedf_ctx *qedf; 1408 struct qedf_cmd_mgr *cmd_mgr; 1409 int i, rc; 1410 1411 if (!fcport) 1412 return; 1413 1414 qedf = fcport->qedf; 1415 cmd_mgr = qedf->cmd_mgr; 1416 1417 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n"); 1418 1419 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 1420 io_req = &cmd_mgr->cmds[i]; 1421 1422 if (!io_req) 1423 continue; 1424 if (io_req->fcport != fcport) 1425 continue; 1426 if (io_req->cmd_type == QEDF_ELS) { 1427 rc = kref_get_unless_zero(&io_req->refcount); 1428 if (!rc) { 1429 QEDF_ERR(&(qedf->dbg_ctx), 1430 "Could not get kref for io_req=0x%p.\n", 1431 io_req); 1432 continue; 1433 } 1434 qedf_flush_els_req(qedf, io_req); 1435 /* 1436 * Release the kref and go back to the top of the 1437 * loop. 1438 */ 1439 goto free_cmd; 1440 } 1441 1442 if (!io_req->sc_cmd) 1443 continue; 1444 if (lun > 0) { 1445 if (io_req->sc_cmd->device->lun != 1446 (u64)lun) 1447 continue; 1448 } 1449 1450 /* 1451 * Use kref_get_unless_zero in the unlikely case the command 1452 * we're about to flush was completed in the normal SCSI path 1453 */ 1454 rc = kref_get_unless_zero(&io_req->refcount); 1455 if (!rc) { 1456 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " 1457 "io_req=0x%p\n", io_req); 1458 continue; 1459 } 1460 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1461 "Cleanup xid=0x%x.\n", io_req->xid); 1462 1463 /* Cleanup task and return I/O mid-layer */ 1464 qedf_initiate_cleanup(io_req, true); 1465 1466 free_cmd: 1467 kref_put(&io_req->refcount, qedf_release_cmd); 1468 } 1469 } 1470 1471 /* 1472 * Initiate a ABTS middle path command. Note that we don't have to initialize 1473 * the task context for an ABTS task. 1474 */ 1475 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) 1476 { 1477 struct fc_lport *lport; 1478 struct qedf_rport *fcport = io_req->fcport; 1479 struct fc_rport_priv *rdata = fcport->rdata; 1480 struct qedf_ctx *qedf = fcport->qedf; 1481 u16 xid; 1482 u32 r_a_tov = 0; 1483 int rc = 0; 1484 unsigned long flags; 1485 struct fcoe_wqe *sqe; 1486 u16 sqe_idx; 1487 1488 r_a_tov = rdata->r_a_tov; 1489 lport = qedf->lport; 1490 1491 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1492 QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n"); 1493 rc = 1; 1494 goto abts_err; 1495 } 1496 1497 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 1498 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 1499 rc = 1; 1500 goto abts_err; 1501 } 1502 1503 if (atomic_read(&qedf->link_down_tmo_valid) > 0) { 1504 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); 1505 rc = 1; 1506 goto abts_err; 1507 } 1508 1509 /* Ensure room on SQ */ 1510 if (!atomic_read(&fcport->free_sqes)) { 1511 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 1512 rc = 1; 1513 goto abts_err; 1514 } 1515 1516 1517 kref_get(&io_req->refcount); 1518 1519 xid = io_req->xid; 1520 qedf->control_requests++; 1521 qedf->packet_aborts++; 1522 1523 /* Set the return CPU to be the same as the request one */ 1524 io_req->cpu = smp_processor_id(); 1525 1526 /* Set the command type to abort */ 1527 io_req->cmd_type = QEDF_ABTS; 1528 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 1529 1530 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1531 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = " 1532 "0x%x\n", xid); 1533 1534 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ); 1535 1536 spin_lock_irqsave(&fcport->rport_lock, flags); 1537 1538 sqe_idx = qedf_get_sqe_idx(fcport); 1539 sqe = &fcport->sq[sqe_idx]; 1540 memset(sqe, 0, sizeof(struct fcoe_wqe)); 1541 io_req->task_params->sqe = sqe; 1542 1543 init_initiator_abort_fcoe_task(io_req->task_params); 1544 qedf_ring_doorbell(fcport); 1545 1546 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1547 1548 return rc; 1549 abts_err: 1550 /* 1551 * If the ABTS task fails to queue then we need to cleanup the 1552 * task at the firmware. 1553 */ 1554 qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts); 1555 return rc; 1556 } 1557 1558 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1559 struct qedf_ioreq *io_req) 1560 { 1561 uint32_t r_ctl; 1562 uint16_t xid; 1563 1564 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " 1565 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); 1566 1567 cancel_delayed_work(&io_req->timeout_work); 1568 1569 xid = io_req->xid; 1570 r_ctl = cqe->cqe_info.abts_info.r_ctl; 1571 1572 switch (r_ctl) { 1573 case FC_RCTL_BA_ACC: 1574 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1575 "ABTS response - ACC Send RRQ after R_A_TOV\n"); 1576 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; 1577 /* 1578 * Dont release this cmd yet. It will be relesed 1579 * after we get RRQ response 1580 */ 1581 kref_get(&io_req->refcount); 1582 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, 1583 msecs_to_jiffies(qedf->lport->r_a_tov)); 1584 break; 1585 /* For error cases let the cleanup return the command */ 1586 case FC_RCTL_BA_RJT: 1587 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1588 "ABTS response - RJT\n"); 1589 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; 1590 break; 1591 default: 1592 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); 1593 break; 1594 } 1595 1596 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1597 1598 if (io_req->sc_cmd) { 1599 if (io_req->return_scsi_cmd_on_abts) 1600 qedf_scsi_done(qedf, io_req, DID_ERROR); 1601 } 1602 1603 /* Notify eh_abort handler that ABTS is complete */ 1604 complete(&io_req->abts_done); 1605 1606 kref_put(&io_req->refcount, qedf_release_cmd); 1607 } 1608 1609 int qedf_init_mp_req(struct qedf_ioreq *io_req) 1610 { 1611 struct qedf_mp_req *mp_req; 1612 struct scsi_sge *mp_req_bd; 1613 struct scsi_sge *mp_resp_bd; 1614 struct qedf_ctx *qedf = io_req->fcport->qedf; 1615 dma_addr_t addr; 1616 uint64_t sz; 1617 1618 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); 1619 1620 mp_req = (struct qedf_mp_req *)&(io_req->mp_req); 1621 memset(mp_req, 0, sizeof(struct qedf_mp_req)); 1622 1623 if (io_req->cmd_type != QEDF_ELS) { 1624 mp_req->req_len = sizeof(struct fcp_cmnd); 1625 io_req->data_xfer_len = mp_req->req_len; 1626 } else 1627 mp_req->req_len = io_req->data_xfer_len; 1628 1629 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 1630 &mp_req->req_buf_dma, GFP_KERNEL); 1631 if (!mp_req->req_buf) { 1632 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); 1633 qedf_free_mp_resc(io_req); 1634 return -ENOMEM; 1635 } 1636 1637 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, 1638 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); 1639 if (!mp_req->resp_buf) { 1640 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " 1641 "buffer\n"); 1642 qedf_free_mp_resc(io_req); 1643 return -ENOMEM; 1644 } 1645 1646 /* Allocate and map mp_req_bd and mp_resp_bd */ 1647 sz = sizeof(struct scsi_sge); 1648 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 1649 &mp_req->mp_req_bd_dma, GFP_KERNEL); 1650 if (!mp_req->mp_req_bd) { 1651 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); 1652 qedf_free_mp_resc(io_req); 1653 return -ENOMEM; 1654 } 1655 1656 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 1657 &mp_req->mp_resp_bd_dma, GFP_KERNEL); 1658 if (!mp_req->mp_resp_bd) { 1659 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); 1660 qedf_free_mp_resc(io_req); 1661 return -ENOMEM; 1662 } 1663 1664 /* Fill bd table */ 1665 addr = mp_req->req_buf_dma; 1666 mp_req_bd = mp_req->mp_req_bd; 1667 mp_req_bd->sge_addr.lo = U64_LO(addr); 1668 mp_req_bd->sge_addr.hi = U64_HI(addr); 1669 mp_req_bd->sge_len = QEDF_PAGE_SIZE; 1670 1671 /* 1672 * MP buffer is either a task mgmt command or an ELS. 1673 * So the assumption is that it consumes a single bd 1674 * entry in the bd table 1675 */ 1676 mp_resp_bd = mp_req->mp_resp_bd; 1677 addr = mp_req->resp_buf_dma; 1678 mp_resp_bd->sge_addr.lo = U64_LO(addr); 1679 mp_resp_bd->sge_addr.hi = U64_HI(addr); 1680 mp_resp_bd->sge_len = QEDF_PAGE_SIZE; 1681 1682 return 0; 1683 } 1684 1685 /* 1686 * Last ditch effort to clear the port if it's stuck. Used only after a 1687 * cleanup task times out. 1688 */ 1689 static void qedf_drain_request(struct qedf_ctx *qedf) 1690 { 1691 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 1692 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); 1693 return; 1694 } 1695 1696 /* Set bit to return all queuecommand requests as busy */ 1697 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 1698 1699 /* Call qed drain request for function. Should be synchronous */ 1700 qed_ops->common->drain(qedf->cdev); 1701 1702 /* Settle time for CQEs to be returned */ 1703 msleep(100); 1704 1705 /* Unplug and continue */ 1706 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 1707 } 1708 1709 /* 1710 * Returns SUCCESS if the cleanup task does not timeout, otherwise return 1711 * FAILURE. 1712 */ 1713 int qedf_initiate_cleanup(struct qedf_ioreq *io_req, 1714 bool return_scsi_cmd_on_abts) 1715 { 1716 struct qedf_rport *fcport; 1717 struct qedf_ctx *qedf; 1718 uint16_t xid; 1719 struct fcoe_task_context *task; 1720 int tmo = 0; 1721 int rc = SUCCESS; 1722 unsigned long flags; 1723 struct fcoe_wqe *sqe; 1724 u16 sqe_idx; 1725 1726 fcport = io_req->fcport; 1727 if (!fcport) { 1728 QEDF_ERR(NULL, "fcport is NULL.\n"); 1729 return SUCCESS; 1730 } 1731 1732 qedf = fcport->qedf; 1733 if (!qedf) { 1734 QEDF_ERR(NULL, "qedf is NULL.\n"); 1735 return SUCCESS; 1736 } 1737 1738 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1739 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { 1740 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " 1741 "cleanup processing or already completed.\n", 1742 io_req->xid); 1743 return SUCCESS; 1744 } 1745 1746 /* Ensure room on SQ */ 1747 if (!atomic_read(&fcport->free_sqes)) { 1748 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 1749 return FAILED; 1750 } 1751 1752 1753 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n", 1754 io_req->xid); 1755 1756 /* Cleanup cmds re-use the same TID as the original I/O */ 1757 xid = io_req->xid; 1758 io_req->cmd_type = QEDF_CLEANUP; 1759 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 1760 1761 /* Set the return CPU to be the same as the request one */ 1762 io_req->cpu = smp_processor_id(); 1763 1764 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 1765 1766 task = qedf_get_task_mem(&qedf->tasks, xid); 1767 1768 init_completion(&io_req->tm_done); 1769 1770 spin_lock_irqsave(&fcport->rport_lock, flags); 1771 1772 sqe_idx = qedf_get_sqe_idx(fcport); 1773 sqe = &fcport->sq[sqe_idx]; 1774 memset(sqe, 0, sizeof(struct fcoe_wqe)); 1775 io_req->task_params->sqe = sqe; 1776 1777 init_initiator_cleanup_fcoe_task(io_req->task_params); 1778 qedf_ring_doorbell(fcport); 1779 1780 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1781 1782 tmo = wait_for_completion_timeout(&io_req->tm_done, 1783 QEDF_CLEANUP_TIMEOUT * HZ); 1784 1785 if (!tmo) { 1786 rc = FAILED; 1787 /* Timeout case */ 1788 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " 1789 "xid=%x.\n", io_req->xid); 1790 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 1791 /* Issue a drain request if cleanup task times out */ 1792 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); 1793 qedf_drain_request(qedf); 1794 } 1795 1796 if (io_req->sc_cmd) { 1797 if (io_req->return_scsi_cmd_on_abts) 1798 qedf_scsi_done(qedf, io_req, DID_ERROR); 1799 } 1800 1801 if (rc == SUCCESS) 1802 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; 1803 else 1804 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; 1805 1806 return rc; 1807 } 1808 1809 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1810 struct qedf_ioreq *io_req) 1811 { 1812 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", 1813 io_req->xid); 1814 1815 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 1816 1817 /* Complete so we can finish cleaning up the I/O */ 1818 complete(&io_req->tm_done); 1819 } 1820 1821 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, 1822 uint8_t tm_flags) 1823 { 1824 struct qedf_ioreq *io_req; 1825 struct fcoe_task_context *task; 1826 struct qedf_ctx *qedf = fcport->qedf; 1827 struct fc_lport *lport = qedf->lport; 1828 int rc = 0; 1829 uint16_t xid; 1830 int tmo = 0; 1831 unsigned long flags; 1832 struct fcoe_wqe *sqe; 1833 u16 sqe_idx; 1834 1835 if (!sc_cmd) { 1836 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); 1837 return FAILED; 1838 } 1839 1840 if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) { 1841 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); 1842 rc = FAILED; 1843 return FAILED; 1844 } 1845 1846 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x " 1847 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags); 1848 1849 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); 1850 if (!io_req) { 1851 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); 1852 rc = -EAGAIN; 1853 goto reset_tmf_err; 1854 } 1855 1856 /* Initialize rest of io_req fields */ 1857 io_req->sc_cmd = sc_cmd; 1858 io_req->fcport = fcport; 1859 io_req->cmd_type = QEDF_TASK_MGMT_CMD; 1860 1861 /* Set the return CPU to be the same as the request one */ 1862 io_req->cpu = smp_processor_id(); 1863 1864 /* Set TM flags */ 1865 io_req->io_req_flags = QEDF_READ; 1866 io_req->data_xfer_len = 0; 1867 io_req->tm_flags = tm_flags; 1868 1869 /* Default is to return a SCSI command when an error occurs */ 1870 io_req->return_scsi_cmd_on_abts = true; 1871 1872 /* Obtain exchange id */ 1873 xid = io_req->xid; 1874 1875 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " 1876 "0x%x\n", xid); 1877 1878 /* Initialize task context for this IO request */ 1879 task = qedf_get_task_mem(&qedf->tasks, xid); 1880 1881 init_completion(&io_req->tm_done); 1882 1883 spin_lock_irqsave(&fcport->rport_lock, flags); 1884 1885 sqe_idx = qedf_get_sqe_idx(fcport); 1886 sqe = &fcport->sq[sqe_idx]; 1887 memset(sqe, 0, sizeof(struct fcoe_wqe)); 1888 1889 qedf_init_task(fcport, lport, io_req, task, sqe); 1890 qedf_ring_doorbell(fcport); 1891 1892 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1893 1894 tmo = wait_for_completion_timeout(&io_req->tm_done, 1895 QEDF_TM_TIMEOUT * HZ); 1896 1897 if (!tmo) { 1898 rc = FAILED; 1899 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); 1900 } else { 1901 /* Check TMF response code */ 1902 if (io_req->fcp_rsp_code == 0) 1903 rc = SUCCESS; 1904 else 1905 rc = FAILED; 1906 } 1907 1908 if (tm_flags == FCP_TMF_LUN_RESET) 1909 qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun); 1910 else 1911 qedf_flush_active_ios(fcport, -1); 1912 1913 kref_put(&io_req->refcount, qedf_release_cmd); 1914 1915 if (rc != SUCCESS) { 1916 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); 1917 rc = FAILED; 1918 } else { 1919 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); 1920 rc = SUCCESS; 1921 } 1922 reset_tmf_err: 1923 return rc; 1924 } 1925 1926 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 1927 { 1928 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1929 struct fc_rport_libfc_priv *rp = rport->dd_data; 1930 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; 1931 struct qedf_ctx *qedf; 1932 struct fc_lport *lport; 1933 int rc = SUCCESS; 1934 int rval; 1935 1936 rval = fc_remote_port_chkready(rport); 1937 1938 if (rval) { 1939 QEDF_ERR(NULL, "device_reset rport not ready\n"); 1940 rc = FAILED; 1941 goto tmf_err; 1942 } 1943 1944 if (fcport == NULL) { 1945 QEDF_ERR(NULL, "device_reset: rport is NULL\n"); 1946 rc = FAILED; 1947 goto tmf_err; 1948 } 1949 1950 qedf = fcport->qedf; 1951 lport = qedf->lport; 1952 1953 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 1954 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 1955 rc = SUCCESS; 1956 goto tmf_err; 1957 } 1958 1959 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 1960 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 1961 rc = FAILED; 1962 goto tmf_err; 1963 } 1964 1965 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); 1966 1967 tmf_err: 1968 return rc; 1969 } 1970 1971 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1972 struct qedf_ioreq *io_req) 1973 { 1974 struct fcoe_cqe_rsp_info *fcp_rsp; 1975 1976 fcp_rsp = &cqe->cqe_info.rsp_info; 1977 qedf_parse_fcp_rsp(io_req, fcp_rsp); 1978 1979 io_req->sc_cmd = NULL; 1980 complete(&io_req->tm_done); 1981 } 1982 1983 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, 1984 struct fcoe_cqe *cqe) 1985 { 1986 unsigned long flags; 1987 uint16_t tmp; 1988 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; 1989 u32 payload_len, crc; 1990 struct fc_frame_header *fh; 1991 struct fc_frame *fp; 1992 struct qedf_io_work *io_work; 1993 u32 bdq_idx; 1994 void *bdq_addr; 1995 1996 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 1997 "address.hi=%x address.lo=%x opaque_data.hi=%x " 1998 "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n", 1999 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi), 2000 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo), 2001 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi), 2002 le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo), 2003 qedf->bdq_prod_idx, pktlen); 2004 2005 bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo); 2006 if (bdq_idx >= QEDF_BDQ_SIZE) { 2007 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", 2008 bdq_idx); 2009 goto increment_prod; 2010 } 2011 2012 bdq_addr = qedf->bdq[bdq_idx].buf_addr; 2013 if (!bdq_addr) { 2014 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " 2015 "unsolicited packet.\n"); 2016 goto increment_prod; 2017 } 2018 2019 if (qedf_dump_frames) { 2020 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2021 "BDQ frame is at addr=%p.\n", bdq_addr); 2022 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, 2023 (void *)bdq_addr, pktlen, false); 2024 } 2025 2026 /* Allocate frame */ 2027 payload_len = pktlen - sizeof(struct fc_frame_header); 2028 fp = fc_frame_alloc(qedf->lport, payload_len); 2029 if (!fp) { 2030 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); 2031 goto increment_prod; 2032 } 2033 2034 /* Copy data from BDQ buffer into fc_frame struct */ 2035 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 2036 memcpy(fh, (void *)bdq_addr, pktlen); 2037 2038 /* Initialize the frame so libfc sees it as a valid frame */ 2039 crc = fcoe_fc_crc(fp); 2040 fc_frame_init(fp); 2041 fr_dev(fp) = qedf->lport; 2042 fr_sof(fp) = FC_SOF_I3; 2043 fr_eof(fp) = FC_EOF_T; 2044 fr_crc(fp) = cpu_to_le32(~crc); 2045 2046 /* 2047 * We need to return the frame back up to libfc in a non-atomic 2048 * context 2049 */ 2050 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); 2051 if (!io_work) { 2052 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " 2053 "work for I/O completion.\n"); 2054 fc_frame_free(fp); 2055 goto increment_prod; 2056 } 2057 memset(io_work, 0, sizeof(struct qedf_io_work)); 2058 2059 INIT_WORK(&io_work->work, qedf_fp_io_handler); 2060 2061 /* Copy contents of CQE for deferred processing */ 2062 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); 2063 2064 io_work->qedf = qedf; 2065 io_work->fp = fp; 2066 2067 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); 2068 increment_prod: 2069 spin_lock_irqsave(&qedf->hba_lock, flags); 2070 2071 /* Increment producer to let f/w know we've handled the frame */ 2072 qedf->bdq_prod_idx++; 2073 2074 /* Producer index wraps at uint16_t boundary */ 2075 if (qedf->bdq_prod_idx == 0xffff) 2076 qedf->bdq_prod_idx = 0; 2077 2078 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); 2079 tmp = readw(qedf->bdq_primary_prod); 2080 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); 2081 tmp = readw(qedf->bdq_secondary_prod); 2082 2083 spin_unlock_irqrestore(&qedf->hba_lock, flags); 2084 } 2085