1 /* 2 * QLogic FCoE Offload Driver 3 * Copyright (c) 2016-2018 Cavium Inc. 4 * 5 * This software is available under the terms of the GNU General Public License 6 * (GPL) Version 2, available from the file COPYING in the main directory of 7 * this source tree. 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/vmalloc.h> 11 #include "qedf.h" 12 #include <scsi/scsi_tcq.h> 13 14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 15 unsigned int timer_msec) 16 { 17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, 18 msecs_to_jiffies(timer_msec)); 19 } 20 21 static void qedf_cmd_timeout(struct work_struct *work) 22 { 23 24 struct qedf_ioreq *io_req = 25 container_of(work, struct qedf_ioreq, timeout_work.work); 26 struct qedf_ctx *qedf; 27 struct qedf_rport *fcport; 28 u8 op = 0; 29 30 if (io_req == NULL) { 31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); 32 return; 33 } 34 35 fcport = io_req->fcport; 36 if (io_req->fcport == NULL) { 37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); 38 return; 39 } 40 41 qedf = fcport->qedf; 42 43 switch (io_req->cmd_type) { 44 case QEDF_ABTS: 45 if (qedf == NULL) { 46 QEDF_INFO(NULL, QEDF_LOG_IO, 47 "qedf is NULL for ABTS xid=0x%x.\n", 48 io_req->xid); 49 return; 50 } 51 52 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", 53 io_req->xid); 54 /* Cleanup timed out ABTS */ 55 qedf_initiate_cleanup(io_req, true); 56 complete(&io_req->abts_done); 57 58 /* 59 * Need to call kref_put for reference taken when initiate_abts 60 * was called since abts_compl won't be called now that we've 61 * cleaned up the task. 62 */ 63 kref_put(&io_req->refcount, qedf_release_cmd); 64 65 /* Clear in abort bit now that we're done with the command */ 66 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 67 68 /* 69 * Now that the original I/O and the ABTS are complete see 70 * if we need to reconnect to the target. 71 */ 72 qedf_restart_rport(fcport); 73 break; 74 case QEDF_ELS: 75 if (!qedf) { 76 QEDF_INFO(NULL, QEDF_LOG_IO, 77 "qedf is NULL for ELS xid=0x%x.\n", 78 io_req->xid); 79 return; 80 } 81 /* ELS request no longer outstanding since it timed out */ 82 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 83 84 kref_get(&io_req->refcount); 85 /* 86 * Don't attempt to clean an ELS timeout as any subseqeunt 87 * ABTS or cleanup requests just hang. For now just free 88 * the resources of the original I/O and the RRQ 89 */ 90 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", 91 io_req->xid); 92 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 93 /* Call callback function to complete command */ 94 if (io_req->cb_func && io_req->cb_arg) { 95 op = io_req->cb_arg->op; 96 io_req->cb_func(io_req->cb_arg); 97 io_req->cb_arg = NULL; 98 } 99 qedf_initiate_cleanup(io_req, true); 100 kref_put(&io_req->refcount, qedf_release_cmd); 101 break; 102 case QEDF_SEQ_CLEANUP: 103 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " 104 "xid=0x%x.\n", io_req->xid); 105 qedf_initiate_cleanup(io_req, true); 106 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 107 qedf_process_seq_cleanup_compl(qedf, NULL, io_req); 108 break; 109 default: 110 break; 111 } 112 } 113 114 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) 115 { 116 struct io_bdt *bdt_info; 117 struct qedf_ctx *qedf = cmgr->qedf; 118 size_t bd_tbl_sz; 119 u16 min_xid = 0; 120 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 121 int num_ios; 122 int i; 123 struct qedf_ioreq *io_req; 124 125 num_ios = max_xid - min_xid + 1; 126 127 /* Free fcoe_bdt_ctx structures */ 128 if (!cmgr->io_bdt_pool) 129 goto free_cmd_pool; 130 131 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); 132 for (i = 0; i < num_ios; i++) { 133 bdt_info = cmgr->io_bdt_pool[i]; 134 if (bdt_info->bd_tbl) { 135 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, 136 bdt_info->bd_tbl, bdt_info->bd_tbl_dma); 137 bdt_info->bd_tbl = NULL; 138 } 139 } 140 141 /* Destroy io_bdt pool */ 142 for (i = 0; i < num_ios; i++) { 143 kfree(cmgr->io_bdt_pool[i]); 144 cmgr->io_bdt_pool[i] = NULL; 145 } 146 147 kfree(cmgr->io_bdt_pool); 148 cmgr->io_bdt_pool = NULL; 149 150 free_cmd_pool: 151 152 for (i = 0; i < num_ios; i++) { 153 io_req = &cmgr->cmds[i]; 154 kfree(io_req->sgl_task_params); 155 kfree(io_req->task_params); 156 /* Make sure we free per command sense buffer */ 157 if (io_req->sense_buffer) 158 dma_free_coherent(&qedf->pdev->dev, 159 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, 160 io_req->sense_buffer_dma); 161 cancel_delayed_work_sync(&io_req->rrq_work); 162 } 163 164 /* Free command manager itself */ 165 vfree(cmgr); 166 } 167 168 static void qedf_handle_rrq(struct work_struct *work) 169 { 170 struct qedf_ioreq *io_req = 171 container_of(work, struct qedf_ioreq, rrq_work.work); 172 173 qedf_send_rrq(io_req); 174 175 } 176 177 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) 178 { 179 struct qedf_cmd_mgr *cmgr; 180 struct io_bdt *bdt_info; 181 struct qedf_ioreq *io_req; 182 u16 xid; 183 int i; 184 int num_ios; 185 u16 min_xid = 0; 186 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 187 188 /* Make sure num_queues is already set before calling this function */ 189 if (!qedf->num_queues) { 190 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); 191 return NULL; 192 } 193 194 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 195 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " 196 "max_xid 0x%x.\n", min_xid, max_xid); 197 return NULL; 198 } 199 200 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " 201 "0x%x.\n", min_xid, max_xid); 202 203 num_ios = max_xid - min_xid + 1; 204 205 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); 206 if (!cmgr) { 207 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); 208 return NULL; 209 } 210 211 cmgr->qedf = qedf; 212 spin_lock_init(&cmgr->lock); 213 214 /* 215 * Initialize I/O request fields. 216 */ 217 xid = 0; 218 219 for (i = 0; i < num_ios; i++) { 220 io_req = &cmgr->cmds[i]; 221 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); 222 223 io_req->xid = xid++; 224 225 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); 226 227 /* Allocate DMA memory to hold sense buffer */ 228 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, 229 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, 230 GFP_KERNEL); 231 if (!io_req->sense_buffer) 232 goto mem_err; 233 234 /* Allocate task parameters to pass to f/w init funcions */ 235 io_req->task_params = kzalloc(sizeof(*io_req->task_params), 236 GFP_KERNEL); 237 if (!io_req->task_params) { 238 QEDF_ERR(&(qedf->dbg_ctx), 239 "Failed to allocate task_params for xid=0x%x\n", 240 i); 241 goto mem_err; 242 } 243 244 /* 245 * Allocate scatter/gather list info to pass to f/w init 246 * functions. 247 */ 248 io_req->sgl_task_params = kzalloc( 249 sizeof(struct scsi_sgl_task_params), GFP_KERNEL); 250 if (!io_req->sgl_task_params) { 251 QEDF_ERR(&(qedf->dbg_ctx), 252 "Failed to allocate sgl_task_params for xid=0x%x\n", 253 i); 254 goto mem_err; 255 } 256 } 257 258 /* Allocate pool of io_bdts - one for each qedf_ioreq */ 259 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), 260 GFP_KERNEL); 261 262 if (!cmgr->io_bdt_pool) { 263 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); 264 goto mem_err; 265 } 266 267 for (i = 0; i < num_ios; i++) { 268 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), 269 GFP_KERNEL); 270 if (!cmgr->io_bdt_pool[i]) { 271 QEDF_WARN(&(qedf->dbg_ctx), 272 "Failed to alloc io_bdt_pool[%d].\n", i); 273 goto mem_err; 274 } 275 } 276 277 for (i = 0; i < num_ios; i++) { 278 bdt_info = cmgr->io_bdt_pool[i]; 279 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, 280 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), 281 &bdt_info->bd_tbl_dma, GFP_KERNEL); 282 if (!bdt_info->bd_tbl) { 283 QEDF_WARN(&(qedf->dbg_ctx), 284 "Failed to alloc bdt_tbl[%d].\n", i); 285 goto mem_err; 286 } 287 } 288 atomic_set(&cmgr->free_list_cnt, num_ios); 289 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 290 "cmgr->free_list_cnt=%d.\n", 291 atomic_read(&cmgr->free_list_cnt)); 292 293 return cmgr; 294 295 mem_err: 296 qedf_cmd_mgr_free(cmgr); 297 return NULL; 298 } 299 300 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) 301 { 302 struct qedf_ctx *qedf = fcport->qedf; 303 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; 304 struct qedf_ioreq *io_req = NULL; 305 struct io_bdt *bd_tbl; 306 u16 xid; 307 uint32_t free_sqes; 308 int i; 309 unsigned long flags; 310 311 free_sqes = atomic_read(&fcport->free_sqes); 312 313 if (!free_sqes) { 314 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 315 "Returning NULL, free_sqes=%d.\n ", 316 free_sqes); 317 goto out_failed; 318 } 319 320 /* Limit the number of outstanding R/W tasks */ 321 if ((atomic_read(&fcport->num_active_ios) >= 322 NUM_RW_TASKS_PER_CONNECTION)) { 323 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 324 "Returning NULL, num_active_ios=%d.\n", 325 atomic_read(&fcport->num_active_ios)); 326 goto out_failed; 327 } 328 329 /* Limit global TIDs certain tasks */ 330 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { 331 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 332 "Returning NULL, free_list_cnt=%d.\n", 333 atomic_read(&cmd_mgr->free_list_cnt)); 334 goto out_failed; 335 } 336 337 spin_lock_irqsave(&cmd_mgr->lock, flags); 338 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 339 io_req = &cmd_mgr->cmds[cmd_mgr->idx]; 340 cmd_mgr->idx++; 341 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) 342 cmd_mgr->idx = 0; 343 344 /* Check to make sure command was previously freed */ 345 if (!io_req->alloc) 346 break; 347 } 348 349 if (i == FCOE_PARAMS_NUM_TASKS) { 350 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 351 goto out_failed; 352 } 353 354 /* Clear any flags now that we've reallocated the xid */ 355 io_req->flags = 0; 356 io_req->alloc = 1; 357 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 358 359 atomic_inc(&fcport->num_active_ios); 360 atomic_dec(&fcport->free_sqes); 361 xid = io_req->xid; 362 atomic_dec(&cmd_mgr->free_list_cnt); 363 364 io_req->cmd_mgr = cmd_mgr; 365 io_req->fcport = fcport; 366 367 /* Clear any stale sc_cmd back pointer */ 368 io_req->sc_cmd = NULL; 369 io_req->lun = -1; 370 371 /* Hold the io_req against deletion */ 372 kref_init(&io_req->refcount); 373 374 /* Bind io_bdt for this io_req */ 375 /* Have a static link between io_req and io_bdt_pool */ 376 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 377 if (bd_tbl == NULL) { 378 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); 379 kref_put(&io_req->refcount, qedf_release_cmd); 380 goto out_failed; 381 } 382 bd_tbl->io_req = io_req; 383 io_req->cmd_type = cmd_type; 384 io_req->tm_flags = 0; 385 386 /* Reset sequence offset data */ 387 io_req->rx_buf_off = 0; 388 io_req->tx_buf_off = 0; 389 io_req->rx_id = 0xffff; /* No OX_ID */ 390 391 return io_req; 392 393 out_failed: 394 /* Record failure for stats and return NULL to caller */ 395 qedf->alloc_failures++; 396 return NULL; 397 } 398 399 static void qedf_free_mp_resc(struct qedf_ioreq *io_req) 400 { 401 struct qedf_mp_req *mp_req = &(io_req->mp_req); 402 struct qedf_ctx *qedf = io_req->fcport->qedf; 403 uint64_t sz = sizeof(struct scsi_sge); 404 405 /* clear tm flags */ 406 if (mp_req->mp_req_bd) { 407 dma_free_coherent(&qedf->pdev->dev, sz, 408 mp_req->mp_req_bd, mp_req->mp_req_bd_dma); 409 mp_req->mp_req_bd = NULL; 410 } 411 if (mp_req->mp_resp_bd) { 412 dma_free_coherent(&qedf->pdev->dev, sz, 413 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); 414 mp_req->mp_resp_bd = NULL; 415 } 416 if (mp_req->req_buf) { 417 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 418 mp_req->req_buf, mp_req->req_buf_dma); 419 mp_req->req_buf = NULL; 420 } 421 if (mp_req->resp_buf) { 422 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 423 mp_req->resp_buf, mp_req->resp_buf_dma); 424 mp_req->resp_buf = NULL; 425 } 426 } 427 428 void qedf_release_cmd(struct kref *ref) 429 { 430 struct qedf_ioreq *io_req = 431 container_of(ref, struct qedf_ioreq, refcount); 432 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 433 struct qedf_rport *fcport = io_req->fcport; 434 unsigned long flags; 435 436 if (io_req->cmd_type == QEDF_SCSI_CMD) 437 WARN_ON(io_req->sc_cmd); 438 439 if (io_req->cmd_type == QEDF_ELS || 440 io_req->cmd_type == QEDF_TASK_MGMT_CMD) 441 qedf_free_mp_resc(io_req); 442 443 atomic_inc(&cmd_mgr->free_list_cnt); 444 atomic_dec(&fcport->num_active_ios); 445 if (atomic_read(&fcport->num_active_ios) < 0) 446 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); 447 448 /* Increment task retry identifier now that the request is released */ 449 io_req->task_retry_identifier++; 450 io_req->fcport = NULL; 451 452 clear_bit(QEDF_CMD_DIRTY, &io_req->flags); 453 io_req->cpu = 0; 454 spin_lock_irqsave(&cmd_mgr->lock, flags); 455 io_req->fcport = NULL; 456 io_req->alloc = 0; 457 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 458 } 459 460 static int qedf_map_sg(struct qedf_ioreq *io_req) 461 { 462 struct scsi_cmnd *sc = io_req->sc_cmd; 463 struct Scsi_Host *host = sc->device->host; 464 struct fc_lport *lport = shost_priv(host); 465 struct qedf_ctx *qedf = lport_priv(lport); 466 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 467 struct scatterlist *sg; 468 int byte_count = 0; 469 int sg_count = 0; 470 int bd_count = 0; 471 u32 sg_len; 472 u64 addr, end_addr; 473 int i = 0; 474 475 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), 476 scsi_sg_count(sc), sc->sc_data_direction); 477 sg = scsi_sglist(sc); 478 479 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE; 480 481 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ) 482 io_req->sge_type = QEDF_IOREQ_FAST_SGE; 483 484 scsi_for_each_sg(sc, sg, sg_count, i) { 485 sg_len = (u32)sg_dma_len(sg); 486 addr = (u64)sg_dma_address(sg); 487 end_addr = (u64)(addr + sg_len); 488 489 /* 490 * Intermediate s/g element so check if start and end address 491 * is page aligned. Only required for writes and only if the 492 * number of scatter/gather elements is 8 or more. 493 */ 494 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) && 495 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE) 496 io_req->sge_type = QEDF_IOREQ_SLOW_SGE; 497 498 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr)); 499 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr)); 500 bd[bd_count].sge_len = cpu_to_le32(sg_len); 501 502 bd_count++; 503 byte_count += sg_len; 504 } 505 506 /* To catch a case where FAST and SLOW nothing is set, set FAST */ 507 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE) 508 io_req->sge_type = QEDF_IOREQ_FAST_SGE; 509 510 if (byte_count != scsi_bufflen(sc)) 511 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " 512 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, 513 scsi_bufflen(sc), io_req->xid); 514 515 return bd_count; 516 } 517 518 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) 519 { 520 struct scsi_cmnd *sc = io_req->sc_cmd; 521 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 522 int bd_count; 523 524 if (scsi_sg_count(sc)) { 525 bd_count = qedf_map_sg(io_req); 526 if (bd_count == 0) 527 return -ENOMEM; 528 } else { 529 bd_count = 0; 530 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; 531 bd[0].sge_len = 0; 532 } 533 io_req->bd_tbl->bd_valid = bd_count; 534 535 return 0; 536 } 537 538 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, 539 struct fcp_cmnd *fcp_cmnd) 540 { 541 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 542 543 /* fcp_cmnd is 32 bytes */ 544 memset(fcp_cmnd, 0, FCP_CMND_LEN); 545 546 /* 8 bytes: SCSI LUN info */ 547 int_to_scsilun(sc_cmd->device->lun, 548 (struct scsi_lun *)&fcp_cmnd->fc_lun); 549 550 /* 4 bytes: flag info */ 551 fcp_cmnd->fc_pri_ta = 0; 552 fcp_cmnd->fc_tm_flags = io_req->tm_flags; 553 fcp_cmnd->fc_flags = io_req->io_req_flags; 554 fcp_cmnd->fc_cmdref = 0; 555 556 /* Populate data direction */ 557 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 558 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 559 } else { 560 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 561 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; 562 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) 563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 564 } 565 566 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 567 568 /* 16 bytes: CDB information */ 569 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) 570 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 571 572 /* 4 bytes: FCP data length */ 573 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 574 } 575 576 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, 577 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx, 578 struct fcoe_wqe *sqe) 579 { 580 enum fcoe_task_type task_type; 581 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 582 struct io_bdt *bd_tbl = io_req->bd_tbl; 583 u8 fcp_cmnd[32]; 584 u32 tmp_fcp_cmnd[8]; 585 int bd_count = 0; 586 struct qedf_ctx *qedf = fcport->qedf; 587 uint16_t cq_idx = smp_processor_id() % qedf->num_queues; 588 struct regpair sense_data_buffer_phys_addr; 589 u32 tx_io_size = 0; 590 u32 rx_io_size = 0; 591 int i, cnt; 592 593 /* Note init_initiator_rw_fcoe_task memsets the task context */ 594 io_req->task = task_ctx; 595 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); 596 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); 597 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 598 599 /* Set task type bassed on DMA directio of command */ 600 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 601 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 602 } else { 603 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 604 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 605 tx_io_size = io_req->data_xfer_len; 606 } else { 607 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 608 rx_io_size = io_req->data_xfer_len; 609 } 610 } 611 612 /* Setup the fields for fcoe_task_params */ 613 io_req->task_params->context = task_ctx; 614 io_req->task_params->sqe = sqe; 615 io_req->task_params->task_type = task_type; 616 io_req->task_params->tx_io_size = tx_io_size; 617 io_req->task_params->rx_io_size = rx_io_size; 618 io_req->task_params->conn_cid = fcport->fw_cid; 619 io_req->task_params->itid = io_req->xid; 620 io_req->task_params->cq_rss_number = cq_idx; 621 io_req->task_params->is_tape_device = fcport->dev_type; 622 623 /* Fill in information for scatter/gather list */ 624 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { 625 bd_count = bd_tbl->bd_valid; 626 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; 627 io_req->sgl_task_params->sgl_phys_addr.lo = 628 U64_LO(bd_tbl->bd_tbl_dma); 629 io_req->sgl_task_params->sgl_phys_addr.hi = 630 U64_HI(bd_tbl->bd_tbl_dma); 631 io_req->sgl_task_params->num_sges = bd_count; 632 io_req->sgl_task_params->total_buffer_size = 633 scsi_bufflen(io_req->sc_cmd); 634 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) 635 io_req->sgl_task_params->small_mid_sge = 1; 636 else 637 io_req->sgl_task_params->small_mid_sge = 0; 638 } 639 640 /* Fill in physical address of sense buffer */ 641 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); 642 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); 643 644 /* fill FCP_CMND IU */ 645 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); 646 647 /* Swap fcp_cmnd since FC is big endian */ 648 cnt = sizeof(struct fcp_cmnd) / sizeof(u32); 649 for (i = 0; i < cnt; i++) { 650 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); 651 } 652 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); 653 654 init_initiator_rw_fcoe_task(io_req->task_params, 655 io_req->sgl_task_params, 656 sense_data_buffer_phys_addr, 657 io_req->task_retry_identifier, fcp_cmnd); 658 659 /* Increment SGL type counters */ 660 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) 661 qedf->slow_sge_ios++; 662 else 663 qedf->fast_sge_ios++; 664 } 665 666 void qedf_init_mp_task(struct qedf_ioreq *io_req, 667 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) 668 { 669 struct qedf_mp_req *mp_req = &(io_req->mp_req); 670 struct qedf_rport *fcport = io_req->fcport; 671 struct qedf_ctx *qedf = io_req->fcport->qedf; 672 struct fc_frame_header *fc_hdr; 673 struct fcoe_tx_mid_path_params task_fc_hdr; 674 struct scsi_sgl_task_params tx_sgl_task_params; 675 struct scsi_sgl_task_params rx_sgl_task_params; 676 677 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 678 "Initializing MP task for cmd_type=%d\n", 679 io_req->cmd_type); 680 681 qedf->control_requests++; 682 683 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 684 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 685 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); 686 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); 687 688 /* Setup the task from io_req for easy reference */ 689 io_req->task = task_ctx; 690 691 /* Setup the fields for fcoe_task_params */ 692 io_req->task_params->context = task_ctx; 693 io_req->task_params->sqe = sqe; 694 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; 695 io_req->task_params->tx_io_size = io_req->data_xfer_len; 696 /* rx_io_size tells the f/w how large a response buffer we have */ 697 io_req->task_params->rx_io_size = PAGE_SIZE; 698 io_req->task_params->conn_cid = fcport->fw_cid; 699 io_req->task_params->itid = io_req->xid; 700 /* Return middle path commands on CQ 0 */ 701 io_req->task_params->cq_rss_number = 0; 702 io_req->task_params->is_tape_device = fcport->dev_type; 703 704 fc_hdr = &(mp_req->req_fc_hdr); 705 /* Set OX_ID and RX_ID based on driver task id */ 706 fc_hdr->fh_ox_id = io_req->xid; 707 fc_hdr->fh_rx_id = htons(0xffff); 708 709 /* Set up FC header information */ 710 task_fc_hdr.parameter = fc_hdr->fh_parm_offset; 711 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; 712 task_fc_hdr.type = fc_hdr->fh_type; 713 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; 714 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; 715 task_fc_hdr.rx_id = fc_hdr->fh_rx_id; 716 task_fc_hdr.ox_id = fc_hdr->fh_ox_id; 717 718 /* Set up s/g list parameters for request buffer */ 719 tx_sgl_task_params.sgl = mp_req->mp_req_bd; 720 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); 721 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); 722 tx_sgl_task_params.num_sges = 1; 723 /* Set PAGE_SIZE for now since sg element is that size ??? */ 724 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; 725 tx_sgl_task_params.small_mid_sge = 0; 726 727 /* Set up s/g list parameters for request buffer */ 728 rx_sgl_task_params.sgl = mp_req->mp_resp_bd; 729 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); 730 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); 731 rx_sgl_task_params.num_sges = 1; 732 /* Set PAGE_SIZE for now since sg element is that size ??? */ 733 rx_sgl_task_params.total_buffer_size = PAGE_SIZE; 734 rx_sgl_task_params.small_mid_sge = 0; 735 736 737 /* 738 * Last arg is 0 as previous code did not set that we wanted the 739 * fc header information. 740 */ 741 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, 742 &task_fc_hdr, 743 &tx_sgl_task_params, 744 &rx_sgl_task_params, 0); 745 } 746 747 /* Presumed that fcport->rport_lock is held */ 748 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) 749 { 750 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); 751 u16 rval; 752 753 rval = fcport->sq_prod_idx; 754 755 /* Adjust ring index */ 756 fcport->sq_prod_idx++; 757 fcport->fw_sq_prod_idx++; 758 if (fcport->sq_prod_idx == total_sqe) 759 fcport->sq_prod_idx = 0; 760 761 return rval; 762 } 763 764 void qedf_ring_doorbell(struct qedf_rport *fcport) 765 { 766 struct fcoe_db_data dbell = { 0 }; 767 768 dbell.agg_flags = 0; 769 770 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; 771 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; 772 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << 773 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; 774 775 dbell.sq_prod = fcport->fw_sq_prod_idx; 776 /* wmb makes sure that the BDs data is updated before updating the 777 * producer, otherwise FW may read old data from the BDs. 778 */ 779 wmb(); 780 barrier(); 781 writel(*(u32 *)&dbell, fcport->p_doorbell); 782 /* 783 * Fence required to flush the write combined buffer, since another 784 * CPU may write to the same doorbell address and data may be lost 785 * due to relaxed order nature of write combined bar. 786 */ 787 wmb(); 788 } 789 790 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, 791 int8_t direction) 792 { 793 struct qedf_ctx *qedf = fcport->qedf; 794 struct qedf_io_log *io_log; 795 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 796 unsigned long flags; 797 uint8_t op; 798 799 spin_lock_irqsave(&qedf->io_trace_lock, flags); 800 801 io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; 802 io_log->direction = direction; 803 io_log->task_id = io_req->xid; 804 io_log->port_id = fcport->rdata->ids.port_id; 805 io_log->lun = sc_cmd->device->lun; 806 io_log->op = op = sc_cmd->cmnd[0]; 807 io_log->lba[0] = sc_cmd->cmnd[2]; 808 io_log->lba[1] = sc_cmd->cmnd[3]; 809 io_log->lba[2] = sc_cmd->cmnd[4]; 810 io_log->lba[3] = sc_cmd->cmnd[5]; 811 io_log->bufflen = scsi_bufflen(sc_cmd); 812 io_log->sg_count = scsi_sg_count(sc_cmd); 813 io_log->result = sc_cmd->result; 814 io_log->jiffies = jiffies; 815 io_log->refcount = kref_read(&io_req->refcount); 816 817 if (direction == QEDF_IO_TRACE_REQ) { 818 /* For requests we only care abot the submission CPU */ 819 io_log->req_cpu = io_req->cpu; 820 io_log->int_cpu = 0; 821 io_log->rsp_cpu = 0; 822 } else if (direction == QEDF_IO_TRACE_RSP) { 823 io_log->req_cpu = io_req->cpu; 824 io_log->int_cpu = io_req->int_cpu; 825 io_log->rsp_cpu = smp_processor_id(); 826 } 827 828 io_log->sge_type = io_req->sge_type; 829 830 qedf->io_trace_idx++; 831 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) 832 qedf->io_trace_idx = 0; 833 834 spin_unlock_irqrestore(&qedf->io_trace_lock, flags); 835 } 836 837 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) 838 { 839 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 840 struct Scsi_Host *host = sc_cmd->device->host; 841 struct fc_lport *lport = shost_priv(host); 842 struct qedf_ctx *qedf = lport_priv(lport); 843 struct e4_fcoe_task_context *task_ctx; 844 u16 xid; 845 enum fcoe_task_type req_type = 0; 846 struct fcoe_wqe *sqe; 847 u16 sqe_idx; 848 849 /* Initialize rest of io_req fileds */ 850 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 851 sc_cmd->SCp.ptr = (char *)io_req; 852 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */ 853 854 /* Record which cpu this request is associated with */ 855 io_req->cpu = smp_processor_id(); 856 857 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 858 req_type = FCOE_TASK_TYPE_READ_INITIATOR; 859 io_req->io_req_flags = QEDF_READ; 860 qedf->input_requests++; 861 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 862 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 863 io_req->io_req_flags = QEDF_WRITE; 864 qedf->output_requests++; 865 } else { 866 io_req->io_req_flags = 0; 867 qedf->control_requests++; 868 } 869 870 xid = io_req->xid; 871 872 /* Build buffer descriptor list for firmware from sg list */ 873 if (qedf_build_bd_list_from_sg(io_req)) { 874 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); 875 /* Release cmd will release io_req, but sc_cmd is assigned */ 876 io_req->sc_cmd = NULL; 877 kref_put(&io_req->refcount, qedf_release_cmd); 878 return -EAGAIN; 879 } 880 881 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || 882 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 883 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); 884 /* Release cmd will release io_req, but sc_cmd is assigned */ 885 io_req->sc_cmd = NULL; 886 kref_put(&io_req->refcount, qedf_release_cmd); 887 return -EINVAL; 888 } 889 890 /* Record LUN number for later use if we neeed them */ 891 io_req->lun = (int)sc_cmd->device->lun; 892 893 /* Obtain free SQE */ 894 sqe_idx = qedf_get_sqe_idx(fcport); 895 sqe = &fcport->sq[sqe_idx]; 896 memset(sqe, 0, sizeof(struct fcoe_wqe)); 897 898 /* Get the task context */ 899 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 900 if (!task_ctx) { 901 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", 902 xid); 903 /* Release cmd will release io_req, but sc_cmd is assigned */ 904 io_req->sc_cmd = NULL; 905 kref_put(&io_req->refcount, qedf_release_cmd); 906 return -EINVAL; 907 } 908 909 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); 910 911 /* Ring doorbell */ 912 qedf_ring_doorbell(fcport); 913 914 /* Set that command is with the firmware now */ 915 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 916 917 if (qedf_io_tracing && io_req->sc_cmd) 918 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); 919 920 return false; 921 } 922 923 int 924 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) 925 { 926 struct fc_lport *lport = shost_priv(host); 927 struct qedf_ctx *qedf = lport_priv(lport); 928 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 929 struct fc_rport_libfc_priv *rp = rport->dd_data; 930 struct qedf_rport *fcport; 931 struct qedf_ioreq *io_req; 932 int rc = 0; 933 int rval; 934 unsigned long flags = 0; 935 int num_sgs = 0; 936 937 num_sgs = scsi_sg_count(sc_cmd); 938 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) { 939 QEDF_ERR(&qedf->dbg_ctx, 940 "Number of SG elements %d exceeds what hardware limitation of %d.\n", 941 num_sgs, QEDF_MAX_BDS_PER_CMD); 942 sc_cmd->result = DID_ERROR; 943 sc_cmd->scsi_done(sc_cmd); 944 return 0; 945 } 946 947 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 948 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 949 sc_cmd->result = DID_NO_CONNECT << 16; 950 sc_cmd->scsi_done(sc_cmd); 951 return 0; 952 } 953 954 if (!qedf->pdev->msix_enabled) { 955 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 956 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n", 957 sc_cmd); 958 sc_cmd->result = DID_NO_CONNECT << 16; 959 sc_cmd->scsi_done(sc_cmd); 960 return 0; 961 } 962 963 rval = fc_remote_port_chkready(rport); 964 if (rval) { 965 sc_cmd->result = rval; 966 sc_cmd->scsi_done(sc_cmd); 967 return 0; 968 } 969 970 /* Retry command if we are doing a qed drain operation */ 971 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 972 rc = SCSI_MLQUEUE_HOST_BUSY; 973 goto exit_qcmd; 974 } 975 976 if (lport->state != LPORT_ST_READY || 977 atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 978 rc = SCSI_MLQUEUE_HOST_BUSY; 979 goto exit_qcmd; 980 } 981 982 /* rport and tgt are allocated together, so tgt should be non-NULL */ 983 fcport = (struct qedf_rport *)&rp[1]; 984 985 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 986 /* 987 * Session is not offloaded yet. Let SCSI-ml retry 988 * the command. 989 */ 990 rc = SCSI_MLQUEUE_TARGET_BUSY; 991 goto exit_qcmd; 992 } 993 994 atomic_inc(&fcport->ios_to_queue); 995 996 if (fcport->retry_delay_timestamp) { 997 if (time_after(jiffies, fcport->retry_delay_timestamp)) { 998 fcport->retry_delay_timestamp = 0; 999 } else { 1000 /* If retry_delay timer is active, flow off the ML */ 1001 rc = SCSI_MLQUEUE_TARGET_BUSY; 1002 atomic_dec(&fcport->ios_to_queue); 1003 goto exit_qcmd; 1004 } 1005 } 1006 1007 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); 1008 if (!io_req) { 1009 rc = SCSI_MLQUEUE_HOST_BUSY; 1010 atomic_dec(&fcport->ios_to_queue); 1011 goto exit_qcmd; 1012 } 1013 1014 io_req->sc_cmd = sc_cmd; 1015 1016 /* Take fcport->rport_lock for posting to fcport send queue */ 1017 spin_lock_irqsave(&fcport->rport_lock, flags); 1018 if (qedf_post_io_req(fcport, io_req)) { 1019 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); 1020 /* Return SQE to pool */ 1021 atomic_inc(&fcport->free_sqes); 1022 rc = SCSI_MLQUEUE_HOST_BUSY; 1023 } 1024 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1025 atomic_dec(&fcport->ios_to_queue); 1026 1027 exit_qcmd: 1028 return rc; 1029 } 1030 1031 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, 1032 struct fcoe_cqe_rsp_info *fcp_rsp) 1033 { 1034 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1035 struct qedf_ctx *qedf = io_req->fcport->qedf; 1036 u8 rsp_flags = fcp_rsp->rsp_flags.flags; 1037 int fcp_sns_len = 0; 1038 int fcp_rsp_len = 0; 1039 uint8_t *rsp_info, *sense_data; 1040 1041 io_req->fcp_status = FC_GOOD; 1042 io_req->fcp_resid = 0; 1043 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | 1044 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) 1045 io_req->fcp_resid = fcp_rsp->fcp_resid; 1046 1047 io_req->scsi_comp_flags = rsp_flags; 1048 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1049 fcp_rsp->scsi_status_code; 1050 1051 if (rsp_flags & 1052 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) 1053 fcp_rsp_len = fcp_rsp->fcp_rsp_len; 1054 1055 if (rsp_flags & 1056 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) 1057 fcp_sns_len = fcp_rsp->fcp_sns_len; 1058 1059 io_req->fcp_rsp_len = fcp_rsp_len; 1060 io_req->fcp_sns_len = fcp_sns_len; 1061 rsp_info = sense_data = io_req->sense_buffer; 1062 1063 /* fetch fcp_rsp_code */ 1064 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1065 /* Only for task management function */ 1066 io_req->fcp_rsp_code = rsp_info[3]; 1067 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1068 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); 1069 /* Adjust sense-data location. */ 1070 sense_data += fcp_rsp_len; 1071 } 1072 1073 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1074 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1075 "Truncating sense buffer\n"); 1076 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1077 } 1078 1079 /* The sense buffer can be NULL for TMF commands */ 1080 if (sc_cmd->sense_buffer) { 1081 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1082 if (fcp_sns_len) 1083 memcpy(sc_cmd->sense_buffer, sense_data, 1084 fcp_sns_len); 1085 } 1086 } 1087 1088 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) 1089 { 1090 struct scsi_cmnd *sc = io_req->sc_cmd; 1091 1092 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1093 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), 1094 scsi_sg_count(sc), sc->sc_data_direction); 1095 io_req->bd_tbl->bd_valid = 0; 1096 } 1097 } 1098 1099 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1100 struct qedf_ioreq *io_req) 1101 { 1102 u16 xid, rval; 1103 struct e4_fcoe_task_context *task_ctx; 1104 struct scsi_cmnd *sc_cmd; 1105 struct fcoe_cqe_rsp_info *fcp_rsp; 1106 struct qedf_rport *fcport; 1107 int refcount; 1108 u16 scope, qualifier = 0; 1109 u8 fw_residual_flag = 0; 1110 1111 if (!io_req) 1112 return; 1113 if (!cqe) 1114 return; 1115 1116 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1117 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || 1118 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { 1119 QEDF_ERR(&qedf->dbg_ctx, 1120 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n", 1121 io_req->xid); 1122 return; 1123 } 1124 1125 xid = io_req->xid; 1126 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 1127 sc_cmd = io_req->sc_cmd; 1128 fcp_rsp = &cqe->cqe_info.rsp_info; 1129 1130 if (!sc_cmd) { 1131 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1132 return; 1133 } 1134 1135 if (!sc_cmd->SCp.ptr) { 1136 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1137 "another context.\n"); 1138 return; 1139 } 1140 1141 if (!sc_cmd->device) { 1142 QEDF_ERR(&qedf->dbg_ctx, 1143 "Device for sc_cmd %p is NULL.\n", sc_cmd); 1144 return; 1145 } 1146 1147 if (!sc_cmd->request) { 1148 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " 1149 "sc_cmd=%p.\n", sc_cmd); 1150 return; 1151 } 1152 1153 if (!sc_cmd->request->q) { 1154 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " 1155 "is not valid, sc_cmd=%p.\n", sc_cmd); 1156 return; 1157 } 1158 1159 fcport = io_req->fcport; 1160 1161 /* 1162 * When flush is active, let the cmds be completed from the cleanup 1163 * context 1164 */ 1165 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || 1166 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && 1167 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { 1168 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1169 "Dropping good completion xid=0x%x as fcport is flushing", 1170 io_req->xid); 1171 return; 1172 } 1173 1174 qedf_parse_fcp_rsp(io_req, fcp_rsp); 1175 1176 qedf_unmap_sg_list(qedf, io_req); 1177 1178 /* Check for FCP transport error */ 1179 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { 1180 QEDF_ERR(&(qedf->dbg_ctx), 1181 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " 1182 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, 1183 io_req->fcp_rsp_code); 1184 sc_cmd->result = DID_BUS_BUSY << 16; 1185 goto out; 1186 } 1187 1188 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, 1189 FCOE_CQE_RSP_INFO_FW_UNDERRUN); 1190 if (fw_residual_flag) { 1191 QEDF_ERR(&(qedf->dbg_ctx), 1192 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x " 1193 "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid, 1194 fcp_rsp->rsp_flags.flags, io_req->fcp_resid, 1195 cqe->cqe_info.rsp_info.fw_residual); 1196 1197 if (io_req->cdb_status == 0) 1198 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; 1199 else 1200 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1201 1202 /* Abort the command since we did not get all the data */ 1203 init_completion(&io_req->abts_done); 1204 rval = qedf_initiate_abts(io_req, true); 1205 if (rval) { 1206 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1207 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; 1208 } 1209 1210 /* 1211 * Set resid to the whole buffer length so we won't try to resue 1212 * any previously data. 1213 */ 1214 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1215 goto out; 1216 } 1217 1218 switch (io_req->fcp_status) { 1219 case FC_GOOD: 1220 if (io_req->cdb_status == 0) { 1221 /* Good I/O completion */ 1222 sc_cmd->result = DID_OK << 16; 1223 } else { 1224 refcount = kref_read(&io_req->refcount); 1225 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1226 "%d:0:%d:%lld xid=0x%0x op=0x%02x " 1227 "lba=%02x%02x%02x%02x cdb_status=%d " 1228 "fcp_resid=0x%x refcount=%d.\n", 1229 qedf->lport->host->host_no, sc_cmd->device->id, 1230 sc_cmd->device->lun, io_req->xid, 1231 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], 1232 sc_cmd->cmnd[4], sc_cmd->cmnd[5], 1233 io_req->cdb_status, io_req->fcp_resid, 1234 refcount); 1235 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1236 1237 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1238 io_req->cdb_status == SAM_STAT_BUSY) { 1239 /* 1240 * Check whether we need to set retry_delay at 1241 * all based on retry_delay module parameter 1242 * and the status qualifier. 1243 */ 1244 1245 /* Upper 2 bits */ 1246 scope = fcp_rsp->retry_delay_timer & 0xC000; 1247 /* Lower 14 bits */ 1248 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; 1249 1250 if (qedf_retry_delay && 1251 scope > 0 && qualifier > 0 && 1252 qualifier <= 0x3FEF) { 1253 /* Check we don't go over the max */ 1254 if (qualifier > QEDF_RETRY_DELAY_MAX) 1255 qualifier = 1256 QEDF_RETRY_DELAY_MAX; 1257 fcport->retry_delay_timestamp = 1258 jiffies + (qualifier * HZ / 10); 1259 } 1260 /* Record stats */ 1261 if (io_req->cdb_status == 1262 SAM_STAT_TASK_SET_FULL) 1263 qedf->task_set_fulls++; 1264 else 1265 qedf->busy++; 1266 } 1267 } 1268 if (io_req->fcp_resid) 1269 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1270 break; 1271 default: 1272 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", 1273 io_req->fcp_status); 1274 break; 1275 } 1276 1277 out: 1278 if (qedf_io_tracing) 1279 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); 1280 1281 /* 1282 * We wait till the end of the function to clear the 1283 * outstanding bit in case we need to send an abort 1284 */ 1285 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 1286 1287 io_req->sc_cmd = NULL; 1288 sc_cmd->SCp.ptr = NULL; 1289 sc_cmd->scsi_done(sc_cmd); 1290 kref_put(&io_req->refcount, qedf_release_cmd); 1291 } 1292 1293 /* Return a SCSI command in some other context besides a normal completion */ 1294 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 1295 int result) 1296 { 1297 u16 xid; 1298 struct scsi_cmnd *sc_cmd; 1299 int refcount; 1300 1301 if (!io_req) 1302 return; 1303 1304 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { 1305 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1306 "io_req:%p scsi_done handling already done\n", 1307 io_req); 1308 return; 1309 } 1310 1311 /* 1312 * We will be done with this command after this call so clear the 1313 * outstanding bit. 1314 */ 1315 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 1316 1317 xid = io_req->xid; 1318 sc_cmd = io_req->sc_cmd; 1319 1320 if (!sc_cmd) { 1321 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1322 return; 1323 } 1324 1325 if (!virt_addr_valid(sc_cmd)) { 1326 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd); 1327 goto bad_scsi_ptr; 1328 } 1329 1330 if (!sc_cmd->SCp.ptr) { 1331 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1332 "another context.\n"); 1333 return; 1334 } 1335 1336 if (!sc_cmd->device) { 1337 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", 1338 sc_cmd); 1339 goto bad_scsi_ptr; 1340 } 1341 1342 if (!virt_addr_valid(sc_cmd->device)) { 1343 QEDF_ERR(&qedf->dbg_ctx, 1344 "Device pointer for sc_cmd %p is bad.\n", sc_cmd); 1345 goto bad_scsi_ptr; 1346 } 1347 1348 if (!sc_cmd->sense_buffer) { 1349 QEDF_ERR(&qedf->dbg_ctx, 1350 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n", 1351 sc_cmd); 1352 goto bad_scsi_ptr; 1353 } 1354 1355 if (!virt_addr_valid(sc_cmd->sense_buffer)) { 1356 QEDF_ERR(&qedf->dbg_ctx, 1357 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n", 1358 sc_cmd); 1359 goto bad_scsi_ptr; 1360 } 1361 1362 if (!sc_cmd->scsi_done) { 1363 QEDF_ERR(&qedf->dbg_ctx, 1364 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n", 1365 sc_cmd); 1366 goto bad_scsi_ptr; 1367 } 1368 1369 qedf_unmap_sg_list(qedf, io_req); 1370 1371 sc_cmd->result = result << 16; 1372 refcount = kref_read(&io_req->refcount); 1373 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " 1374 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " 1375 "allowed=%d retries=%d refcount=%d.\n", 1376 qedf->lport->host->host_no, sc_cmd->device->id, 1377 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], 1378 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], 1379 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, 1380 refcount); 1381 1382 /* 1383 * Set resid to the whole buffer length so we won't try to resue any 1384 * previously read data 1385 */ 1386 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1387 1388 if (qedf_io_tracing) 1389 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); 1390 1391 io_req->sc_cmd = NULL; 1392 sc_cmd->SCp.ptr = NULL; 1393 sc_cmd->scsi_done(sc_cmd); 1394 kref_put(&io_req->refcount, qedf_release_cmd); 1395 1396 bad_scsi_ptr: 1397 /* 1398 * Clear the io_req->sc_cmd backpointer so we don't try to process 1399 * this again 1400 */ 1401 io_req->sc_cmd = NULL; 1402 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */ 1403 } 1404 1405 /* 1406 * Handle warning type CQE completions. This is mainly used for REC timer 1407 * popping. 1408 */ 1409 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1410 struct qedf_ioreq *io_req) 1411 { 1412 int rval, i; 1413 struct qedf_rport *fcport = io_req->fcport; 1414 u64 err_warn_bit_map; 1415 u8 err_warn = 0xff; 1416 1417 if (!cqe) 1418 return; 1419 1420 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " 1421 "xid=0x%x\n", io_req->xid); 1422 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1423 "err_warn_bitmap=%08x:%08x\n", 1424 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1425 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1426 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1427 "rx_buff_off=%08x, rx_id=%04x\n", 1428 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1429 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1430 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1431 1432 /* Normalize the error bitmap value to an just an unsigned int */ 1433 err_warn_bit_map = (u64) 1434 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | 1435 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; 1436 for (i = 0; i < 64; i++) { 1437 if (err_warn_bit_map & (u64)((u64)1 << i)) { 1438 err_warn = i; 1439 break; 1440 } 1441 } 1442 1443 /* Check if REC TOV expired if this is a tape device */ 1444 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { 1445 if (err_warn == 1446 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { 1447 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); 1448 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { 1449 io_req->rx_buf_off = 1450 cqe->cqe_info.err_info.rx_buf_off; 1451 io_req->tx_buf_off = 1452 cqe->cqe_info.err_info.tx_buf_off; 1453 io_req->rx_id = cqe->cqe_info.err_info.rx_id; 1454 rval = qedf_send_rec(io_req); 1455 /* 1456 * We only want to abort the io_req if we 1457 * can't queue the REC command as we want to 1458 * keep the exchange open for recovery. 1459 */ 1460 if (rval) 1461 goto send_abort; 1462 } 1463 return; 1464 } 1465 } 1466 1467 send_abort: 1468 init_completion(&io_req->abts_done); 1469 rval = qedf_initiate_abts(io_req, true); 1470 if (rval) 1471 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1472 } 1473 1474 /* Cleanup a command when we receive an error detection completion */ 1475 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1476 struct qedf_ioreq *io_req) 1477 { 1478 int rval; 1479 1480 if (!cqe) 1481 return; 1482 1483 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " 1484 "xid=0x%x\n", io_req->xid); 1485 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1486 "err_warn_bitmap=%08x:%08x\n", 1487 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1488 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1489 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1490 "rx_buff_off=%08x, rx_id=%04x\n", 1491 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1492 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1493 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1494 1495 if (qedf->stop_io_on_error) { 1496 qedf_stop_all_io(qedf); 1497 return; 1498 } 1499 1500 init_completion(&io_req->abts_done); 1501 rval = qedf_initiate_abts(io_req, true); 1502 if (rval) 1503 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1504 } 1505 1506 static void qedf_flush_els_req(struct qedf_ctx *qedf, 1507 struct qedf_ioreq *els_req) 1508 { 1509 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1510 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, 1511 kref_read(&els_req->refcount)); 1512 1513 /* 1514 * Need to distinguish this from a timeout when calling the 1515 * els_req->cb_func. 1516 */ 1517 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; 1518 1519 /* Cancel the timer */ 1520 cancel_delayed_work_sync(&els_req->timeout_work); 1521 1522 /* Call callback function to complete command */ 1523 if (els_req->cb_func && els_req->cb_arg) { 1524 els_req->cb_func(els_req->cb_arg); 1525 els_req->cb_arg = NULL; 1526 } 1527 1528 /* Release kref for original initiate_els */ 1529 kref_put(&els_req->refcount, qedf_release_cmd); 1530 } 1531 1532 /* A value of -1 for lun is a wild card that means flush all 1533 * active SCSI I/Os for the target. 1534 */ 1535 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) 1536 { 1537 struct qedf_ioreq *io_req; 1538 struct qedf_ctx *qedf; 1539 struct qedf_cmd_mgr *cmd_mgr; 1540 int i, rc; 1541 unsigned long flags; 1542 int flush_cnt = 0; 1543 int wait_cnt = 100; 1544 int refcount = 0; 1545 1546 if (!fcport) 1547 return; 1548 1549 /* Check that fcport is still offloaded */ 1550 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1551 QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); 1552 return; 1553 } 1554 1555 qedf = fcport->qedf; 1556 1557 if (!qedf) { 1558 QEDF_ERR(NULL, "qedf is NULL.\n"); 1559 return; 1560 } 1561 1562 /* Only wait for all commands to be queued in the Upload context */ 1563 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && 1564 (lun == -1)) { 1565 while (atomic_read(&fcport->ios_to_queue)) { 1566 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1567 "Waiting for %d I/Os to be queued\n", 1568 atomic_read(&fcport->ios_to_queue)); 1569 if (wait_cnt == 0) { 1570 QEDF_ERR(NULL, 1571 "%d IOs request could not be queued\n", 1572 atomic_read(&fcport->ios_to_queue)); 1573 } 1574 msleep(20); 1575 wait_cnt--; 1576 } 1577 } 1578 1579 cmd_mgr = qedf->cmd_mgr; 1580 1581 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1582 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n", 1583 atomic_read(&fcport->num_active_ios), fcport, 1584 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); 1585 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n"); 1586 1587 mutex_lock(&qedf->flush_mutex); 1588 if (lun == -1) { 1589 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); 1590 } else { 1591 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); 1592 fcport->lun_reset_lun = lun; 1593 } 1594 1595 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 1596 io_req = &cmd_mgr->cmds[i]; 1597 1598 if (!io_req) 1599 continue; 1600 if (!io_req->fcport) 1601 continue; 1602 1603 spin_lock_irqsave(&cmd_mgr->lock, flags); 1604 1605 if (io_req->alloc) { 1606 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { 1607 if (io_req->cmd_type == QEDF_SCSI_CMD) 1608 QEDF_ERR(&qedf->dbg_ctx, 1609 "Allocated but not queued, xid=0x%x\n", 1610 io_req->xid); 1611 } 1612 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 1613 } else { 1614 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 1615 continue; 1616 } 1617 1618 if (io_req->fcport != fcport) 1619 continue; 1620 1621 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response, 1622 * but RRQ is still pending. 1623 * Workaround: Within qedf_send_rrq, we check if the fcport is 1624 * NULL, and we drop the ref on the io_req to clean it up. 1625 */ 1626 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { 1627 refcount = kref_read(&io_req->refcount); 1628 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1629 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n", 1630 io_req->xid, io_req->cmd_type, refcount); 1631 continue; 1632 } 1633 1634 /* Only consider flushing ELS during target reset */ 1635 if (io_req->cmd_type == QEDF_ELS && 1636 lun == -1) { 1637 rc = kref_get_unless_zero(&io_req->refcount); 1638 if (!rc) { 1639 QEDF_ERR(&(qedf->dbg_ctx), 1640 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n", 1641 io_req, io_req->xid); 1642 continue; 1643 } 1644 flush_cnt++; 1645 qedf_flush_els_req(qedf, io_req); 1646 /* 1647 * Release the kref and go back to the top of the 1648 * loop. 1649 */ 1650 goto free_cmd; 1651 } 1652 1653 if (io_req->cmd_type == QEDF_ABTS) { 1654 rc = kref_get_unless_zero(&io_req->refcount); 1655 if (!rc) { 1656 QEDF_ERR(&(qedf->dbg_ctx), 1657 "Could not get kref for abort io_req=0x%p xid=0x%x.\n", 1658 io_req, io_req->xid); 1659 continue; 1660 } 1661 if (lun != -1 && io_req->lun != lun) 1662 goto free_cmd; 1663 1664 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1665 "Flushing abort xid=0x%x.\n", io_req->xid); 1666 1667 if (cancel_delayed_work_sync(&io_req->rrq_work)) { 1668 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1669 "Putting reference for pending RRQ work xid=0x%x.\n", 1670 io_req->xid); 1671 kref_put(&io_req->refcount, qedf_release_cmd); 1672 } 1673 1674 /* Cancel any timeout work */ 1675 cancel_delayed_work_sync(&io_req->timeout_work); 1676 1677 if (!test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) 1678 goto free_cmd; 1679 1680 qedf_initiate_cleanup(io_req, true); 1681 flush_cnt++; 1682 1683 /* Notify eh_abort handler that ABTS is complete */ 1684 kref_put(&io_req->refcount, qedf_release_cmd); 1685 complete(&io_req->abts_done); 1686 1687 goto free_cmd; 1688 } 1689 1690 if (!io_req->sc_cmd) 1691 continue; 1692 if (!io_req->sc_cmd->device) { 1693 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1694 "Device backpointer NULL for sc_cmd=%p.\n", 1695 io_req->sc_cmd); 1696 /* Put reference for non-existent scsi_cmnd */ 1697 io_req->sc_cmd = NULL; 1698 qedf_initiate_cleanup(io_req, false); 1699 kref_put(&io_req->refcount, qedf_release_cmd); 1700 continue; 1701 } 1702 if (lun > -1) { 1703 if (io_req->lun != lun) 1704 continue; 1705 } 1706 1707 /* 1708 * Use kref_get_unless_zero in the unlikely case the command 1709 * we're about to flush was completed in the normal SCSI path 1710 */ 1711 rc = kref_get_unless_zero(&io_req->refcount); 1712 if (!rc) { 1713 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " 1714 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid); 1715 continue; 1716 } 1717 1718 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1719 "Cleanup xid=0x%x.\n", io_req->xid); 1720 flush_cnt++; 1721 1722 /* Cleanup task and return I/O mid-layer */ 1723 qedf_initiate_cleanup(io_req, true); 1724 1725 free_cmd: 1726 kref_put(&io_req->refcount, qedf_release_cmd); 1727 } 1728 1729 wait_cnt = 60; 1730 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1731 "Flushed 0x%x I/Os, active=0x%x.\n", 1732 flush_cnt, atomic_read(&fcport->num_active_ios)); 1733 /* Only wait for all commands to complete in the Upload context */ 1734 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && 1735 (lun == -1)) { 1736 while (atomic_read(&fcport->num_active_ios)) { 1737 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1738 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n", 1739 flush_cnt, 1740 atomic_read(&fcport->num_active_ios), 1741 wait_cnt); 1742 if (wait_cnt == 0) { 1743 QEDF_ERR(&qedf->dbg_ctx, 1744 "Flushed %d I/Os, active=%d.\n", 1745 flush_cnt, 1746 atomic_read(&fcport->num_active_ios)); 1747 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 1748 io_req = &cmd_mgr->cmds[i]; 1749 if (io_req->fcport && 1750 io_req->fcport == fcport) { 1751 refcount = 1752 kref_read(&io_req->refcount); 1753 QEDF_ERR(&qedf->dbg_ctx, 1754 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n", 1755 io_req, io_req->xid, 1756 io_req->flags, 1757 io_req->sc_cmd, 1758 refcount, 1759 io_req->cmd_type); 1760 } 1761 } 1762 WARN_ON(1); 1763 break; 1764 } 1765 msleep(500); 1766 wait_cnt--; 1767 } 1768 } 1769 1770 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); 1771 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); 1772 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n"); 1773 mutex_unlock(&qedf->flush_mutex); 1774 } 1775 1776 /* 1777 * Initiate a ABTS middle path command. Note that we don't have to initialize 1778 * the task context for an ABTS task. 1779 */ 1780 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) 1781 { 1782 struct fc_lport *lport; 1783 struct qedf_rport *fcport = io_req->fcport; 1784 struct fc_rport_priv *rdata; 1785 struct qedf_ctx *qedf; 1786 u16 xid; 1787 u32 r_a_tov = 0; 1788 int rc = 0; 1789 unsigned long flags; 1790 struct fcoe_wqe *sqe; 1791 u16 sqe_idx; 1792 int refcount = 0; 1793 1794 /* Sanity check qedf_rport before dereferencing any pointers */ 1795 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1796 QEDF_ERR(NULL, "tgt not offloaded\n"); 1797 rc = 1; 1798 goto out; 1799 } 1800 1801 rdata = fcport->rdata; 1802 1803 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 1804 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n"); 1805 rc = 1; 1806 goto out; 1807 } 1808 1809 r_a_tov = rdata->r_a_tov; 1810 qedf = fcport->qedf; 1811 lport = qedf->lport; 1812 1813 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 1814 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 1815 rc = 1; 1816 goto out; 1817 } 1818 1819 if (atomic_read(&qedf->link_down_tmo_valid) > 0) { 1820 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); 1821 rc = 1; 1822 goto out; 1823 } 1824 1825 /* Ensure room on SQ */ 1826 if (!atomic_read(&fcport->free_sqes)) { 1827 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 1828 rc = 1; 1829 goto out; 1830 } 1831 1832 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 1833 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); 1834 rc = 1; 1835 goto out; 1836 } 1837 1838 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1839 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || 1840 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { 1841 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " 1842 "cleanup or abort processing or already " 1843 "completed.\n", io_req->xid); 1844 rc = 1; 1845 goto out; 1846 } 1847 1848 kref_get(&io_req->refcount); 1849 1850 xid = io_req->xid; 1851 qedf->control_requests++; 1852 qedf->packet_aborts++; 1853 1854 /* Set the command type to abort */ 1855 io_req->cmd_type = QEDF_ABTS; 1856 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 1857 1858 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1859 refcount = kref_read(&io_req->refcount); 1860 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, 1861 "ABTS io_req xid = 0x%x refcount=%d\n", 1862 xid, refcount); 1863 1864 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT); 1865 1866 spin_lock_irqsave(&fcport->rport_lock, flags); 1867 1868 sqe_idx = qedf_get_sqe_idx(fcport); 1869 sqe = &fcport->sq[sqe_idx]; 1870 memset(sqe, 0, sizeof(struct fcoe_wqe)); 1871 io_req->task_params->sqe = sqe; 1872 1873 init_initiator_abort_fcoe_task(io_req->task_params); 1874 qedf_ring_doorbell(fcport); 1875 1876 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1877 1878 out: 1879 return rc; 1880 } 1881 1882 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1883 struct qedf_ioreq *io_req) 1884 { 1885 uint32_t r_ctl; 1886 uint16_t xid; 1887 int rc; 1888 struct qedf_rport *fcport = io_req->fcport; 1889 1890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " 1891 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); 1892 1893 xid = io_req->xid; 1894 r_ctl = cqe->cqe_info.abts_info.r_ctl; 1895 1896 /* This was added at a point when we were scheduling abts_compl & 1897 * cleanup_compl on different CPUs and there was a possibility of 1898 * the io_req to be freed from the other context before we got here. 1899 */ 1900 if (!fcport) { 1901 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1902 "Dropping ABTS completion xid=0x%x as fcport is NULL", 1903 io_req->xid); 1904 return; 1905 } 1906 1907 /* 1908 * When flush is active, let the cmds be completed from the cleanup 1909 * context 1910 */ 1911 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || 1912 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { 1913 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1914 "Dropping ABTS completion xid=0x%x as fcport is flushing", 1915 io_req->xid); 1916 return; 1917 } 1918 1919 if (!cancel_delayed_work(&io_req->timeout_work)) { 1920 QEDF_ERR(&qedf->dbg_ctx, 1921 "Wasn't able to cancel abts timeout work.\n"); 1922 } 1923 1924 switch (r_ctl) { 1925 case FC_RCTL_BA_ACC: 1926 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1927 "ABTS response - ACC Send RRQ after R_A_TOV\n"); 1928 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; 1929 rc = kref_get_unless_zero(&io_req->refcount); 1930 if (!rc) { 1931 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, 1932 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n", 1933 io_req->xid); 1934 return; 1935 } 1936 /* 1937 * Dont release this cmd yet. It will be relesed 1938 * after we get RRQ response 1939 */ 1940 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, 1941 msecs_to_jiffies(qedf->lport->r_a_tov)); 1942 break; 1943 /* For error cases let the cleanup return the command */ 1944 case FC_RCTL_BA_RJT: 1945 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1946 "ABTS response - RJT\n"); 1947 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; 1948 break; 1949 default: 1950 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); 1951 break; 1952 } 1953 1954 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1955 1956 if (io_req->sc_cmd) { 1957 if (io_req->return_scsi_cmd_on_abts) 1958 qedf_scsi_done(qedf, io_req, DID_ERROR); 1959 } 1960 1961 /* Notify eh_abort handler that ABTS is complete */ 1962 complete(&io_req->abts_done); 1963 1964 kref_put(&io_req->refcount, qedf_release_cmd); 1965 } 1966 1967 int qedf_init_mp_req(struct qedf_ioreq *io_req) 1968 { 1969 struct qedf_mp_req *mp_req; 1970 struct scsi_sge *mp_req_bd; 1971 struct scsi_sge *mp_resp_bd; 1972 struct qedf_ctx *qedf = io_req->fcport->qedf; 1973 dma_addr_t addr; 1974 uint64_t sz; 1975 1976 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); 1977 1978 mp_req = (struct qedf_mp_req *)&(io_req->mp_req); 1979 memset(mp_req, 0, sizeof(struct qedf_mp_req)); 1980 1981 if (io_req->cmd_type != QEDF_ELS) { 1982 mp_req->req_len = sizeof(struct fcp_cmnd); 1983 io_req->data_xfer_len = mp_req->req_len; 1984 } else 1985 mp_req->req_len = io_req->data_xfer_len; 1986 1987 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 1988 &mp_req->req_buf_dma, GFP_KERNEL); 1989 if (!mp_req->req_buf) { 1990 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); 1991 qedf_free_mp_resc(io_req); 1992 return -ENOMEM; 1993 } 1994 1995 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, 1996 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); 1997 if (!mp_req->resp_buf) { 1998 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " 1999 "buffer\n"); 2000 qedf_free_mp_resc(io_req); 2001 return -ENOMEM; 2002 } 2003 2004 /* Allocate and map mp_req_bd and mp_resp_bd */ 2005 sz = sizeof(struct scsi_sge); 2006 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 2007 &mp_req->mp_req_bd_dma, GFP_KERNEL); 2008 if (!mp_req->mp_req_bd) { 2009 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); 2010 qedf_free_mp_resc(io_req); 2011 return -ENOMEM; 2012 } 2013 2014 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 2015 &mp_req->mp_resp_bd_dma, GFP_KERNEL); 2016 if (!mp_req->mp_resp_bd) { 2017 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); 2018 qedf_free_mp_resc(io_req); 2019 return -ENOMEM; 2020 } 2021 2022 /* Fill bd table */ 2023 addr = mp_req->req_buf_dma; 2024 mp_req_bd = mp_req->mp_req_bd; 2025 mp_req_bd->sge_addr.lo = U64_LO(addr); 2026 mp_req_bd->sge_addr.hi = U64_HI(addr); 2027 mp_req_bd->sge_len = QEDF_PAGE_SIZE; 2028 2029 /* 2030 * MP buffer is either a task mgmt command or an ELS. 2031 * So the assumption is that it consumes a single bd 2032 * entry in the bd table 2033 */ 2034 mp_resp_bd = mp_req->mp_resp_bd; 2035 addr = mp_req->resp_buf_dma; 2036 mp_resp_bd->sge_addr.lo = U64_LO(addr); 2037 mp_resp_bd->sge_addr.hi = U64_HI(addr); 2038 mp_resp_bd->sge_len = QEDF_PAGE_SIZE; 2039 2040 return 0; 2041 } 2042 2043 /* 2044 * Last ditch effort to clear the port if it's stuck. Used only after a 2045 * cleanup task times out. 2046 */ 2047 static void qedf_drain_request(struct qedf_ctx *qedf) 2048 { 2049 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 2050 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); 2051 return; 2052 } 2053 2054 /* Set bit to return all queuecommand requests as busy */ 2055 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 2056 2057 /* Call qed drain request for function. Should be synchronous */ 2058 qed_ops->common->drain(qedf->cdev); 2059 2060 /* Settle time for CQEs to be returned */ 2061 msleep(100); 2062 2063 /* Unplug and continue */ 2064 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 2065 } 2066 2067 /* 2068 * Returns SUCCESS if the cleanup task does not timeout, otherwise return 2069 * FAILURE. 2070 */ 2071 int qedf_initiate_cleanup(struct qedf_ioreq *io_req, 2072 bool return_scsi_cmd_on_abts) 2073 { 2074 struct qedf_rport *fcport; 2075 struct qedf_ctx *qedf; 2076 uint16_t xid; 2077 struct e4_fcoe_task_context *task; 2078 int tmo = 0; 2079 int rc = SUCCESS; 2080 unsigned long flags; 2081 struct fcoe_wqe *sqe; 2082 u16 sqe_idx; 2083 int refcount = 0; 2084 2085 fcport = io_req->fcport; 2086 if (!fcport) { 2087 QEDF_ERR(NULL, "fcport is NULL.\n"); 2088 return SUCCESS; 2089 } 2090 2091 /* Sanity check qedf_rport before dereferencing any pointers */ 2092 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2093 QEDF_ERR(NULL, "tgt not offloaded\n"); 2094 rc = 1; 2095 return SUCCESS; 2096 } 2097 2098 qedf = fcport->qedf; 2099 if (!qedf) { 2100 QEDF_ERR(NULL, "qedf is NULL.\n"); 2101 return SUCCESS; 2102 } 2103 2104 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 2105 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { 2106 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " 2107 "cleanup processing or already completed.\n", 2108 io_req->xid); 2109 return SUCCESS; 2110 } 2111 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2112 2113 /* Ensure room on SQ */ 2114 if (!atomic_read(&fcport->free_sqes)) { 2115 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 2116 /* Need to make sure we clear the flag since it was set */ 2117 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2118 return FAILED; 2119 } 2120 2121 if (io_req->cmd_type == QEDF_CLEANUP) { 2122 QEDF_ERR(&qedf->dbg_ctx, 2123 "io_req=0x%x is already a cleanup command cmd_type=%d.\n", 2124 io_req->xid, io_req->cmd_type); 2125 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2126 return SUCCESS; 2127 } 2128 2129 refcount = kref_read(&io_req->refcount); 2130 2131 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 2132 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d\n", 2133 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags, 2134 refcount); 2135 2136 /* Cleanup cmds re-use the same TID as the original I/O */ 2137 xid = io_req->xid; 2138 io_req->cmd_type = QEDF_CLEANUP; 2139 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 2140 2141 task = qedf_get_task_mem(&qedf->tasks, xid); 2142 2143 init_completion(&io_req->cleanup_done); 2144 2145 spin_lock_irqsave(&fcport->rport_lock, flags); 2146 2147 sqe_idx = qedf_get_sqe_idx(fcport); 2148 sqe = &fcport->sq[sqe_idx]; 2149 memset(sqe, 0, sizeof(struct fcoe_wqe)); 2150 io_req->task_params->sqe = sqe; 2151 2152 init_initiator_cleanup_fcoe_task(io_req->task_params); 2153 qedf_ring_doorbell(fcport); 2154 2155 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2156 2157 tmo = wait_for_completion_timeout(&io_req->cleanup_done, 2158 QEDF_CLEANUP_TIMEOUT * HZ); 2159 2160 if (!tmo) { 2161 rc = FAILED; 2162 /* Timeout case */ 2163 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " 2164 "xid=%x.\n", io_req->xid); 2165 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2166 /* Issue a drain request if cleanup task times out */ 2167 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); 2168 qedf_drain_request(qedf); 2169 } 2170 2171 /* If it TASK MGMT handle it, reference will be decreased 2172 * in qedf_execute_tmf 2173 */ 2174 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 2175 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2176 io_req->sc_cmd = NULL; 2177 complete(&io_req->tm_done); 2178 } 2179 2180 if (io_req->sc_cmd) { 2181 if (io_req->return_scsi_cmd_on_abts) 2182 qedf_scsi_done(qedf, io_req, DID_ERROR); 2183 } 2184 2185 if (rc == SUCCESS) 2186 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; 2187 else 2188 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; 2189 2190 return rc; 2191 } 2192 2193 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 2194 struct qedf_ioreq *io_req) 2195 { 2196 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", 2197 io_req->xid); 2198 2199 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2200 2201 /* Complete so we can finish cleaning up the I/O */ 2202 complete(&io_req->cleanup_done); 2203 } 2204 2205 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, 2206 uint8_t tm_flags) 2207 { 2208 struct qedf_ioreq *io_req; 2209 struct e4_fcoe_task_context *task; 2210 struct qedf_ctx *qedf = fcport->qedf; 2211 struct fc_lport *lport = qedf->lport; 2212 int rc = 0; 2213 uint16_t xid; 2214 int tmo = 0; 2215 int lun = 0; 2216 unsigned long flags; 2217 struct fcoe_wqe *sqe; 2218 u16 sqe_idx; 2219 2220 if (!sc_cmd) { 2221 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); 2222 return FAILED; 2223 } 2224 2225 lun = (int)sc_cmd->device->lun; 2226 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2227 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); 2228 rc = FAILED; 2229 return FAILED; 2230 } 2231 2232 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x " 2233 "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags); 2234 2235 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); 2236 if (!io_req) { 2237 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); 2238 rc = -EAGAIN; 2239 goto reset_tmf_err; 2240 } 2241 2242 if (tm_flags == FCP_TMF_LUN_RESET) 2243 qedf->lun_resets++; 2244 else if (tm_flags == FCP_TMF_TGT_RESET) 2245 qedf->target_resets++; 2246 2247 /* Initialize rest of io_req fields */ 2248 io_req->sc_cmd = sc_cmd; 2249 io_req->fcport = fcport; 2250 io_req->cmd_type = QEDF_TASK_MGMT_CMD; 2251 2252 /* Record which cpu this request is associated with */ 2253 io_req->cpu = smp_processor_id(); 2254 2255 /* Set TM flags */ 2256 io_req->io_req_flags = QEDF_READ; 2257 io_req->data_xfer_len = 0; 2258 io_req->tm_flags = tm_flags; 2259 2260 /* Default is to return a SCSI command when an error occurs */ 2261 io_req->return_scsi_cmd_on_abts = false; 2262 2263 /* Obtain exchange id */ 2264 xid = io_req->xid; 2265 2266 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " 2267 "0x%x\n", xid); 2268 2269 /* Initialize task context for this IO request */ 2270 task = qedf_get_task_mem(&qedf->tasks, xid); 2271 2272 init_completion(&io_req->tm_done); 2273 2274 spin_lock_irqsave(&fcport->rport_lock, flags); 2275 2276 sqe_idx = qedf_get_sqe_idx(fcport); 2277 sqe = &fcport->sq[sqe_idx]; 2278 memset(sqe, 0, sizeof(struct fcoe_wqe)); 2279 2280 qedf_init_task(fcport, lport, io_req, task, sqe); 2281 qedf_ring_doorbell(fcport); 2282 2283 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2284 2285 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2286 tmo = wait_for_completion_timeout(&io_req->tm_done, 2287 QEDF_TM_TIMEOUT * HZ); 2288 2289 if (!tmo) { 2290 rc = FAILED; 2291 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); 2292 /* Clear outstanding bit since command timed out */ 2293 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2294 io_req->sc_cmd = NULL; 2295 } else { 2296 /* Check TMF response code */ 2297 if (io_req->fcp_rsp_code == 0) 2298 rc = SUCCESS; 2299 else 2300 rc = FAILED; 2301 } 2302 /* 2303 * Double check that fcport has not gone into an uploading state before 2304 * executing the command flush for the LUN/target. 2305 */ 2306 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 2307 QEDF_ERR(&qedf->dbg_ctx, 2308 "fcport is uploading, not executing flush.\n"); 2309 goto no_flush; 2310 } 2311 /* We do not need this io_req any more */ 2312 kref_put(&io_req->refcount, qedf_release_cmd); 2313 2314 2315 if (tm_flags == FCP_TMF_LUN_RESET) 2316 qedf_flush_active_ios(fcport, lun); 2317 else 2318 qedf_flush_active_ios(fcport, -1); 2319 2320 no_flush: 2321 if (rc != SUCCESS) { 2322 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); 2323 rc = FAILED; 2324 } else { 2325 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); 2326 rc = SUCCESS; 2327 } 2328 reset_tmf_err: 2329 return rc; 2330 } 2331 2332 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 2333 { 2334 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 2335 struct fc_rport_libfc_priv *rp = rport->dd_data; 2336 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; 2337 struct qedf_ctx *qedf; 2338 struct fc_lport *lport; 2339 int rc = SUCCESS; 2340 int rval; 2341 struct qedf_ioreq *io_req = NULL; 2342 int ref_cnt = 0; 2343 struct fc_rport_priv *rdata = fcport->rdata; 2344 2345 QEDF_ERR(NULL, 2346 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", 2347 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id, 2348 (int)sc_cmd->device->lun); 2349 2350 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 2351 QEDF_ERR(NULL, "stale rport\n"); 2352 return FAILED; 2353 } 2354 2355 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id, 2356 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" : 2357 "LUN RESET"); 2358 2359 if (sc_cmd->SCp.ptr) { 2360 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; 2361 ref_cnt = kref_read(&io_req->refcount); 2362 QEDF_ERR(NULL, 2363 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n", 2364 io_req, io_req->xid, ref_cnt); 2365 } 2366 2367 rval = fc_remote_port_chkready(rport); 2368 if (rval) { 2369 QEDF_ERR(NULL, "device_reset rport not ready\n"); 2370 rc = FAILED; 2371 goto tmf_err; 2372 } 2373 2374 rc = fc_block_scsi_eh(sc_cmd); 2375 if (rc) 2376 return rc; 2377 2378 if (!fcport) { 2379 QEDF_ERR(NULL, "device_reset: rport is NULL\n"); 2380 rc = FAILED; 2381 goto tmf_err; 2382 } 2383 2384 qedf = fcport->qedf; 2385 2386 if (!qedf) { 2387 QEDF_ERR(NULL, "qedf is NULL.\n"); 2388 rc = FAILED; 2389 goto tmf_err; 2390 } 2391 2392 lport = qedf->lport; 2393 2394 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 2395 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 2396 rc = SUCCESS; 2397 goto tmf_err; 2398 } 2399 2400 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 2401 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 2402 rc = FAILED; 2403 goto tmf_err; 2404 } 2405 2406 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 2407 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); 2408 rc = FAILED; 2409 goto tmf_err; 2410 } 2411 2412 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); 2413 2414 tmf_err: 2415 return rc; 2416 } 2417 2418 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 2419 struct qedf_ioreq *io_req) 2420 { 2421 struct fcoe_cqe_rsp_info *fcp_rsp; 2422 2423 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2424 2425 fcp_rsp = &cqe->cqe_info.rsp_info; 2426 qedf_parse_fcp_rsp(io_req, fcp_rsp); 2427 2428 io_req->sc_cmd = NULL; 2429 complete(&io_req->tm_done); 2430 } 2431 2432 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, 2433 struct fcoe_cqe *cqe) 2434 { 2435 unsigned long flags; 2436 uint16_t tmp; 2437 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; 2438 u32 payload_len, crc; 2439 struct fc_frame_header *fh; 2440 struct fc_frame *fp; 2441 struct qedf_io_work *io_work; 2442 u32 bdq_idx; 2443 void *bdq_addr; 2444 struct scsi_bd *p_bd_info; 2445 2446 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info; 2447 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2448 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n", 2449 le32_to_cpu(p_bd_info->address.hi), 2450 le32_to_cpu(p_bd_info->address.lo), 2451 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi), 2452 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo), 2453 qedf->bdq_prod_idx, pktlen); 2454 2455 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo); 2456 if (bdq_idx >= QEDF_BDQ_SIZE) { 2457 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", 2458 bdq_idx); 2459 goto increment_prod; 2460 } 2461 2462 bdq_addr = qedf->bdq[bdq_idx].buf_addr; 2463 if (!bdq_addr) { 2464 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " 2465 "unsolicited packet.\n"); 2466 goto increment_prod; 2467 } 2468 2469 if (qedf_dump_frames) { 2470 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2471 "BDQ frame is at addr=%p.\n", bdq_addr); 2472 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, 2473 (void *)bdq_addr, pktlen, false); 2474 } 2475 2476 /* Allocate frame */ 2477 payload_len = pktlen - sizeof(struct fc_frame_header); 2478 fp = fc_frame_alloc(qedf->lport, payload_len); 2479 if (!fp) { 2480 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); 2481 goto increment_prod; 2482 } 2483 2484 /* Copy data from BDQ buffer into fc_frame struct */ 2485 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 2486 memcpy(fh, (void *)bdq_addr, pktlen); 2487 2488 /* Initialize the frame so libfc sees it as a valid frame */ 2489 crc = fcoe_fc_crc(fp); 2490 fc_frame_init(fp); 2491 fr_dev(fp) = qedf->lport; 2492 fr_sof(fp) = FC_SOF_I3; 2493 fr_eof(fp) = FC_EOF_T; 2494 fr_crc(fp) = cpu_to_le32(~crc); 2495 2496 /* 2497 * We need to return the frame back up to libfc in a non-atomic 2498 * context 2499 */ 2500 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); 2501 if (!io_work) { 2502 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " 2503 "work for I/O completion.\n"); 2504 fc_frame_free(fp); 2505 goto increment_prod; 2506 } 2507 memset(io_work, 0, sizeof(struct qedf_io_work)); 2508 2509 INIT_WORK(&io_work->work, qedf_fp_io_handler); 2510 2511 /* Copy contents of CQE for deferred processing */ 2512 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); 2513 2514 io_work->qedf = qedf; 2515 io_work->fp = fp; 2516 2517 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); 2518 increment_prod: 2519 spin_lock_irqsave(&qedf->hba_lock, flags); 2520 2521 /* Increment producer to let f/w know we've handled the frame */ 2522 qedf->bdq_prod_idx++; 2523 2524 /* Producer index wraps at uint16_t boundary */ 2525 if (qedf->bdq_prod_idx == 0xffff) 2526 qedf->bdq_prod_idx = 0; 2527 2528 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); 2529 tmp = readw(qedf->bdq_primary_prod); 2530 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); 2531 tmp = readw(qedf->bdq_secondary_prod); 2532 2533 spin_unlock_irqrestore(&qedf->hba_lock, flags); 2534 } 2535