1 /* 2 * QLogic FCoE Offload Driver 3 * Copyright (c) 2016-2018 Cavium Inc. 4 * 5 * This software is available under the terms of the GNU General Public License 6 * (GPL) Version 2, available from the file COPYING in the main directory of 7 * this source tree. 8 */ 9 #include <linux/spinlock.h> 10 #include <linux/vmalloc.h> 11 #include "qedf.h" 12 #include <scsi/scsi_tcq.h> 13 14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 15 unsigned int timer_msec) 16 { 17 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work, 18 msecs_to_jiffies(timer_msec)); 19 } 20 21 static void qedf_cmd_timeout(struct work_struct *work) 22 { 23 24 struct qedf_ioreq *io_req = 25 container_of(work, struct qedf_ioreq, timeout_work.work); 26 struct qedf_ctx *qedf; 27 struct qedf_rport *fcport; 28 u8 op = 0; 29 30 if (io_req == NULL) { 31 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n"); 32 return; 33 } 34 35 fcport = io_req->fcport; 36 if (io_req->fcport == NULL) { 37 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n"); 38 return; 39 } 40 41 qedf = fcport->qedf; 42 43 switch (io_req->cmd_type) { 44 case QEDF_ABTS: 45 if (qedf == NULL) { 46 QEDF_INFO(NULL, QEDF_LOG_IO, 47 "qedf is NULL for ABTS xid=0x%x.\n", 48 io_req->xid); 49 return; 50 } 51 52 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n", 53 io_req->xid); 54 /* Cleanup timed out ABTS */ 55 qedf_initiate_cleanup(io_req, true); 56 complete(&io_req->abts_done); 57 58 /* 59 * Need to call kref_put for reference taken when initiate_abts 60 * was called since abts_compl won't be called now that we've 61 * cleaned up the task. 62 */ 63 kref_put(&io_req->refcount, qedf_release_cmd); 64 65 /* Clear in abort bit now that we're done with the command */ 66 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 67 68 /* 69 * Now that the original I/O and the ABTS are complete see 70 * if we need to reconnect to the target. 71 */ 72 qedf_restart_rport(fcport); 73 break; 74 case QEDF_ELS: 75 if (!qedf) { 76 QEDF_INFO(NULL, QEDF_LOG_IO, 77 "qedf is NULL for ELS xid=0x%x.\n", 78 io_req->xid); 79 return; 80 } 81 /* ELS request no longer outstanding since it timed out */ 82 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 83 84 kref_get(&io_req->refcount); 85 /* 86 * Don't attempt to clean an ELS timeout as any subseqeunt 87 * ABTS or cleanup requests just hang. For now just free 88 * the resources of the original I/O and the RRQ 89 */ 90 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n", 91 io_req->xid); 92 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 93 /* Call callback function to complete command */ 94 if (io_req->cb_func && io_req->cb_arg) { 95 op = io_req->cb_arg->op; 96 io_req->cb_func(io_req->cb_arg); 97 io_req->cb_arg = NULL; 98 } 99 qedf_initiate_cleanup(io_req, true); 100 kref_put(&io_req->refcount, qedf_release_cmd); 101 break; 102 case QEDF_SEQ_CLEANUP: 103 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, " 104 "xid=0x%x.\n", io_req->xid); 105 qedf_initiate_cleanup(io_req, true); 106 io_req->event = QEDF_IOREQ_EV_ELS_TMO; 107 qedf_process_seq_cleanup_compl(qedf, NULL, io_req); 108 break; 109 default: 110 break; 111 } 112 } 113 114 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) 115 { 116 struct io_bdt *bdt_info; 117 struct qedf_ctx *qedf = cmgr->qedf; 118 size_t bd_tbl_sz; 119 u16 min_xid = 0; 120 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 121 int num_ios; 122 int i; 123 struct qedf_ioreq *io_req; 124 125 num_ios = max_xid - min_xid + 1; 126 127 /* Free fcoe_bdt_ctx structures */ 128 if (!cmgr->io_bdt_pool) 129 goto free_cmd_pool; 130 131 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); 132 for (i = 0; i < num_ios; i++) { 133 bdt_info = cmgr->io_bdt_pool[i]; 134 if (bdt_info->bd_tbl) { 135 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz, 136 bdt_info->bd_tbl, bdt_info->bd_tbl_dma); 137 bdt_info->bd_tbl = NULL; 138 } 139 } 140 141 /* Destroy io_bdt pool */ 142 for (i = 0; i < num_ios; i++) { 143 kfree(cmgr->io_bdt_pool[i]); 144 cmgr->io_bdt_pool[i] = NULL; 145 } 146 147 kfree(cmgr->io_bdt_pool); 148 cmgr->io_bdt_pool = NULL; 149 150 free_cmd_pool: 151 152 for (i = 0; i < num_ios; i++) { 153 io_req = &cmgr->cmds[i]; 154 kfree(io_req->sgl_task_params); 155 kfree(io_req->task_params); 156 /* Make sure we free per command sense buffer */ 157 if (io_req->sense_buffer) 158 dma_free_coherent(&qedf->pdev->dev, 159 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer, 160 io_req->sense_buffer_dma); 161 cancel_delayed_work_sync(&io_req->rrq_work); 162 } 163 164 /* Free command manager itself */ 165 vfree(cmgr); 166 } 167 168 static void qedf_handle_rrq(struct work_struct *work) 169 { 170 struct qedf_ioreq *io_req = 171 container_of(work, struct qedf_ioreq, rrq_work.work); 172 173 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE); 174 qedf_send_rrq(io_req); 175 176 } 177 178 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) 179 { 180 struct qedf_cmd_mgr *cmgr; 181 struct io_bdt *bdt_info; 182 struct qedf_ioreq *io_req; 183 u16 xid; 184 int i; 185 int num_ios; 186 u16 min_xid = 0; 187 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1); 188 189 /* Make sure num_queues is already set before calling this function */ 190 if (!qedf->num_queues) { 191 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n"); 192 return NULL; 193 } 194 195 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 196 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and " 197 "max_xid 0x%x.\n", min_xid, max_xid); 198 return NULL; 199 } 200 201 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid " 202 "0x%x.\n", min_xid, max_xid); 203 204 num_ios = max_xid - min_xid + 1; 205 206 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr)); 207 if (!cmgr) { 208 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n"); 209 return NULL; 210 } 211 212 cmgr->qedf = qedf; 213 spin_lock_init(&cmgr->lock); 214 215 /* 216 * Initialize I/O request fields. 217 */ 218 xid = 0; 219 220 for (i = 0; i < num_ios; i++) { 221 io_req = &cmgr->cmds[i]; 222 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout); 223 224 io_req->xid = xid++; 225 226 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq); 227 228 /* Allocate DMA memory to hold sense buffer */ 229 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, 230 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, 231 GFP_KERNEL); 232 if (!io_req->sense_buffer) 233 goto mem_err; 234 235 /* Allocate task parameters to pass to f/w init funcions */ 236 io_req->task_params = kzalloc(sizeof(*io_req->task_params), 237 GFP_KERNEL); 238 if (!io_req->task_params) { 239 QEDF_ERR(&(qedf->dbg_ctx), 240 "Failed to allocate task_params for xid=0x%x\n", 241 i); 242 goto mem_err; 243 } 244 245 /* 246 * Allocate scatter/gather list info to pass to f/w init 247 * functions. 248 */ 249 io_req->sgl_task_params = kzalloc( 250 sizeof(struct scsi_sgl_task_params), GFP_KERNEL); 251 if (!io_req->sgl_task_params) { 252 QEDF_ERR(&(qedf->dbg_ctx), 253 "Failed to allocate sgl_task_params for xid=0x%x\n", 254 i); 255 goto mem_err; 256 } 257 } 258 259 /* Allocate pool of io_bdts - one for each qedf_ioreq */ 260 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *), 261 GFP_KERNEL); 262 263 if (!cmgr->io_bdt_pool) { 264 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n"); 265 goto mem_err; 266 } 267 268 for (i = 0; i < num_ios; i++) { 269 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt), 270 GFP_KERNEL); 271 if (!cmgr->io_bdt_pool[i]) { 272 QEDF_WARN(&(qedf->dbg_ctx), 273 "Failed to alloc io_bdt_pool[%d].\n", i); 274 goto mem_err; 275 } 276 } 277 278 for (i = 0; i < num_ios; i++) { 279 bdt_info = cmgr->io_bdt_pool[i]; 280 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev, 281 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge), 282 &bdt_info->bd_tbl_dma, GFP_KERNEL); 283 if (!bdt_info->bd_tbl) { 284 QEDF_WARN(&(qedf->dbg_ctx), 285 "Failed to alloc bdt_tbl[%d].\n", i); 286 goto mem_err; 287 } 288 } 289 atomic_set(&cmgr->free_list_cnt, num_ios); 290 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 291 "cmgr->free_list_cnt=%d.\n", 292 atomic_read(&cmgr->free_list_cnt)); 293 294 return cmgr; 295 296 mem_err: 297 qedf_cmd_mgr_free(cmgr); 298 return NULL; 299 } 300 301 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type) 302 { 303 struct qedf_ctx *qedf = fcport->qedf; 304 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr; 305 struct qedf_ioreq *io_req = NULL; 306 struct io_bdt *bd_tbl; 307 u16 xid; 308 uint32_t free_sqes; 309 int i; 310 unsigned long flags; 311 312 free_sqes = atomic_read(&fcport->free_sqes); 313 314 if (!free_sqes) { 315 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 316 "Returning NULL, free_sqes=%d.\n ", 317 free_sqes); 318 goto out_failed; 319 } 320 321 /* Limit the number of outstanding R/W tasks */ 322 if ((atomic_read(&fcport->num_active_ios) >= 323 NUM_RW_TASKS_PER_CONNECTION)) { 324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 325 "Returning NULL, num_active_ios=%d.\n", 326 atomic_read(&fcport->num_active_ios)); 327 goto out_failed; 328 } 329 330 /* Limit global TIDs certain tasks */ 331 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) { 332 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 333 "Returning NULL, free_list_cnt=%d.\n", 334 atomic_read(&cmd_mgr->free_list_cnt)); 335 goto out_failed; 336 } 337 338 spin_lock_irqsave(&cmd_mgr->lock, flags); 339 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 340 io_req = &cmd_mgr->cmds[cmd_mgr->idx]; 341 cmd_mgr->idx++; 342 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS) 343 cmd_mgr->idx = 0; 344 345 /* Check to make sure command was previously freed */ 346 if (!io_req->alloc) 347 break; 348 } 349 350 if (i == FCOE_PARAMS_NUM_TASKS) { 351 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 352 goto out_failed; 353 } 354 355 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags)) 356 QEDF_ERR(&qedf->dbg_ctx, 357 "io_req found to be dirty ox_id = 0x%x.\n", 358 io_req->xid); 359 360 /* Clear any flags now that we've reallocated the xid */ 361 io_req->flags = 0; 362 io_req->alloc = 1; 363 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 364 365 atomic_inc(&fcport->num_active_ios); 366 atomic_dec(&fcport->free_sqes); 367 xid = io_req->xid; 368 atomic_dec(&cmd_mgr->free_list_cnt); 369 370 io_req->cmd_mgr = cmd_mgr; 371 io_req->fcport = fcport; 372 373 /* Clear any stale sc_cmd back pointer */ 374 io_req->sc_cmd = NULL; 375 io_req->lun = -1; 376 377 /* Hold the io_req against deletion */ 378 kref_init(&io_req->refcount); /* ID: 001 */ 379 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE); 380 381 /* Bind io_bdt for this io_req */ 382 /* Have a static link between io_req and io_bdt_pool */ 383 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 384 if (bd_tbl == NULL) { 385 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid); 386 kref_put(&io_req->refcount, qedf_release_cmd); 387 goto out_failed; 388 } 389 bd_tbl->io_req = io_req; 390 io_req->cmd_type = cmd_type; 391 io_req->tm_flags = 0; 392 393 /* Reset sequence offset data */ 394 io_req->rx_buf_off = 0; 395 io_req->tx_buf_off = 0; 396 io_req->rx_id = 0xffff; /* No OX_ID */ 397 398 return io_req; 399 400 out_failed: 401 /* Record failure for stats and return NULL to caller */ 402 qedf->alloc_failures++; 403 return NULL; 404 } 405 406 static void qedf_free_mp_resc(struct qedf_ioreq *io_req) 407 { 408 struct qedf_mp_req *mp_req = &(io_req->mp_req); 409 struct qedf_ctx *qedf = io_req->fcport->qedf; 410 uint64_t sz = sizeof(struct scsi_sge); 411 412 /* clear tm flags */ 413 if (mp_req->mp_req_bd) { 414 dma_free_coherent(&qedf->pdev->dev, sz, 415 mp_req->mp_req_bd, mp_req->mp_req_bd_dma); 416 mp_req->mp_req_bd = NULL; 417 } 418 if (mp_req->mp_resp_bd) { 419 dma_free_coherent(&qedf->pdev->dev, sz, 420 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma); 421 mp_req->mp_resp_bd = NULL; 422 } 423 if (mp_req->req_buf) { 424 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 425 mp_req->req_buf, mp_req->req_buf_dma); 426 mp_req->req_buf = NULL; 427 } 428 if (mp_req->resp_buf) { 429 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 430 mp_req->resp_buf, mp_req->resp_buf_dma); 431 mp_req->resp_buf = NULL; 432 } 433 } 434 435 void qedf_release_cmd(struct kref *ref) 436 { 437 struct qedf_ioreq *io_req = 438 container_of(ref, struct qedf_ioreq, refcount); 439 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 440 struct qedf_rport *fcport = io_req->fcport; 441 unsigned long flags; 442 443 if (io_req->cmd_type == QEDF_SCSI_CMD) 444 WARN_ON(io_req->sc_cmd); 445 446 if (io_req->cmd_type == QEDF_ELS || 447 io_req->cmd_type == QEDF_TASK_MGMT_CMD) 448 qedf_free_mp_resc(io_req); 449 450 atomic_inc(&cmd_mgr->free_list_cnt); 451 atomic_dec(&fcport->num_active_ios); 452 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); 453 if (atomic_read(&fcport->num_active_ios) < 0) 454 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); 455 456 /* Increment task retry identifier now that the request is released */ 457 io_req->task_retry_identifier++; 458 io_req->fcport = NULL; 459 460 clear_bit(QEDF_CMD_DIRTY, &io_req->flags); 461 io_req->cpu = 0; 462 spin_lock_irqsave(&cmd_mgr->lock, flags); 463 io_req->fcport = NULL; 464 io_req->alloc = 0; 465 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 466 } 467 468 static int qedf_map_sg(struct qedf_ioreq *io_req) 469 { 470 struct scsi_cmnd *sc = io_req->sc_cmd; 471 struct Scsi_Host *host = sc->device->host; 472 struct fc_lport *lport = shost_priv(host); 473 struct qedf_ctx *qedf = lport_priv(lport); 474 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 475 struct scatterlist *sg; 476 int byte_count = 0; 477 int sg_count = 0; 478 int bd_count = 0; 479 u32 sg_len; 480 u64 addr, end_addr; 481 int i = 0; 482 483 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc), 484 scsi_sg_count(sc), sc->sc_data_direction); 485 sg = scsi_sglist(sc); 486 487 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE; 488 489 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ) 490 io_req->sge_type = QEDF_IOREQ_FAST_SGE; 491 492 scsi_for_each_sg(sc, sg, sg_count, i) { 493 sg_len = (u32)sg_dma_len(sg); 494 addr = (u64)sg_dma_address(sg); 495 end_addr = (u64)(addr + sg_len); 496 497 /* 498 * Intermediate s/g element so check if start and end address 499 * is page aligned. Only required for writes and only if the 500 * number of scatter/gather elements is 8 or more. 501 */ 502 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) && 503 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE) 504 io_req->sge_type = QEDF_IOREQ_SLOW_SGE; 505 506 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr)); 507 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr)); 508 bd[bd_count].sge_len = cpu_to_le32(sg_len); 509 510 bd_count++; 511 byte_count += sg_len; 512 } 513 514 /* To catch a case where FAST and SLOW nothing is set, set FAST */ 515 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE) 516 io_req->sge_type = QEDF_IOREQ_FAST_SGE; 517 518 if (byte_count != scsi_bufflen(sc)) 519 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != " 520 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count, 521 scsi_bufflen(sc), io_req->xid); 522 523 return bd_count; 524 } 525 526 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) 527 { 528 struct scsi_cmnd *sc = io_req->sc_cmd; 529 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl; 530 int bd_count; 531 532 if (scsi_sg_count(sc)) { 533 bd_count = qedf_map_sg(io_req); 534 if (bd_count == 0) 535 return -ENOMEM; 536 } else { 537 bd_count = 0; 538 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0; 539 bd[0].sge_len = 0; 540 } 541 io_req->bd_tbl->bd_valid = bd_count; 542 543 return 0; 544 } 545 546 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, 547 struct fcp_cmnd *fcp_cmnd) 548 { 549 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 550 551 /* fcp_cmnd is 32 bytes */ 552 memset(fcp_cmnd, 0, FCP_CMND_LEN); 553 554 /* 8 bytes: SCSI LUN info */ 555 int_to_scsilun(sc_cmd->device->lun, 556 (struct scsi_lun *)&fcp_cmnd->fc_lun); 557 558 /* 4 bytes: flag info */ 559 fcp_cmnd->fc_pri_ta = 0; 560 fcp_cmnd->fc_tm_flags = io_req->tm_flags; 561 fcp_cmnd->fc_flags = io_req->io_req_flags; 562 fcp_cmnd->fc_cmdref = 0; 563 564 /* Populate data direction */ 565 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 566 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 567 } else { 568 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 569 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA; 570 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) 571 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA; 572 } 573 574 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 575 576 /* 16 bytes: CDB information */ 577 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) 578 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 579 580 /* 4 bytes: FCP data length */ 581 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 582 } 583 584 static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, 585 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx, 586 struct fcoe_wqe *sqe) 587 { 588 enum fcoe_task_type task_type; 589 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 590 struct io_bdt *bd_tbl = io_req->bd_tbl; 591 u8 fcp_cmnd[32]; 592 u32 tmp_fcp_cmnd[8]; 593 int bd_count = 0; 594 struct qedf_ctx *qedf = fcport->qedf; 595 uint16_t cq_idx = smp_processor_id() % qedf->num_queues; 596 struct regpair sense_data_buffer_phys_addr; 597 u32 tx_io_size = 0; 598 u32 rx_io_size = 0; 599 int i, cnt; 600 601 /* Note init_initiator_rw_fcoe_task memsets the task context */ 602 io_req->task = task_ctx; 603 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); 604 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); 605 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 606 607 /* Set task type bassed on DMA directio of command */ 608 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) { 609 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 610 } else { 611 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 612 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 613 tx_io_size = io_req->data_xfer_len; 614 } else { 615 task_type = FCOE_TASK_TYPE_READ_INITIATOR; 616 rx_io_size = io_req->data_xfer_len; 617 } 618 } 619 620 /* Setup the fields for fcoe_task_params */ 621 io_req->task_params->context = task_ctx; 622 io_req->task_params->sqe = sqe; 623 io_req->task_params->task_type = task_type; 624 io_req->task_params->tx_io_size = tx_io_size; 625 io_req->task_params->rx_io_size = rx_io_size; 626 io_req->task_params->conn_cid = fcport->fw_cid; 627 io_req->task_params->itid = io_req->xid; 628 io_req->task_params->cq_rss_number = cq_idx; 629 io_req->task_params->is_tape_device = fcport->dev_type; 630 631 /* Fill in information for scatter/gather list */ 632 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) { 633 bd_count = bd_tbl->bd_valid; 634 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl; 635 io_req->sgl_task_params->sgl_phys_addr.lo = 636 U64_LO(bd_tbl->bd_tbl_dma); 637 io_req->sgl_task_params->sgl_phys_addr.hi = 638 U64_HI(bd_tbl->bd_tbl_dma); 639 io_req->sgl_task_params->num_sges = bd_count; 640 io_req->sgl_task_params->total_buffer_size = 641 scsi_bufflen(io_req->sc_cmd); 642 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) 643 io_req->sgl_task_params->small_mid_sge = 1; 644 else 645 io_req->sgl_task_params->small_mid_sge = 0; 646 } 647 648 /* Fill in physical address of sense buffer */ 649 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma); 650 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma); 651 652 /* fill FCP_CMND IU */ 653 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd); 654 655 /* Swap fcp_cmnd since FC is big endian */ 656 cnt = sizeof(struct fcp_cmnd) / sizeof(u32); 657 for (i = 0; i < cnt; i++) { 658 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]); 659 } 660 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd)); 661 662 init_initiator_rw_fcoe_task(io_req->task_params, 663 io_req->sgl_task_params, 664 sense_data_buffer_phys_addr, 665 io_req->task_retry_identifier, fcp_cmnd); 666 667 /* Increment SGL type counters */ 668 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE) 669 qedf->slow_sge_ios++; 670 else 671 qedf->fast_sge_ios++; 672 } 673 674 void qedf_init_mp_task(struct qedf_ioreq *io_req, 675 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) 676 { 677 struct qedf_mp_req *mp_req = &(io_req->mp_req); 678 struct qedf_rport *fcport = io_req->fcport; 679 struct qedf_ctx *qedf = io_req->fcport->qedf; 680 struct fc_frame_header *fc_hdr; 681 struct fcoe_tx_mid_path_params task_fc_hdr; 682 struct scsi_sgl_task_params tx_sgl_task_params; 683 struct scsi_sgl_task_params rx_sgl_task_params; 684 685 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 686 "Initializing MP task for cmd_type=%d\n", 687 io_req->cmd_type); 688 689 qedf->control_requests++; 690 691 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 692 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); 693 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); 694 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); 695 696 /* Setup the task from io_req for easy reference */ 697 io_req->task = task_ctx; 698 699 /* Setup the fields for fcoe_task_params */ 700 io_req->task_params->context = task_ctx; 701 io_req->task_params->sqe = sqe; 702 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH; 703 io_req->task_params->tx_io_size = io_req->data_xfer_len; 704 /* rx_io_size tells the f/w how large a response buffer we have */ 705 io_req->task_params->rx_io_size = PAGE_SIZE; 706 io_req->task_params->conn_cid = fcport->fw_cid; 707 io_req->task_params->itid = io_req->xid; 708 /* Return middle path commands on CQ 0 */ 709 io_req->task_params->cq_rss_number = 0; 710 io_req->task_params->is_tape_device = fcport->dev_type; 711 712 fc_hdr = &(mp_req->req_fc_hdr); 713 /* Set OX_ID and RX_ID based on driver task id */ 714 fc_hdr->fh_ox_id = io_req->xid; 715 fc_hdr->fh_rx_id = htons(0xffff); 716 717 /* Set up FC header information */ 718 task_fc_hdr.parameter = fc_hdr->fh_parm_offset; 719 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl; 720 task_fc_hdr.type = fc_hdr->fh_type; 721 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl; 722 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl; 723 task_fc_hdr.rx_id = fc_hdr->fh_rx_id; 724 task_fc_hdr.ox_id = fc_hdr->fh_ox_id; 725 726 /* Set up s/g list parameters for request buffer */ 727 tx_sgl_task_params.sgl = mp_req->mp_req_bd; 728 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma); 729 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma); 730 tx_sgl_task_params.num_sges = 1; 731 /* Set PAGE_SIZE for now since sg element is that size ??? */ 732 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len; 733 tx_sgl_task_params.small_mid_sge = 0; 734 735 /* Set up s/g list parameters for request buffer */ 736 rx_sgl_task_params.sgl = mp_req->mp_resp_bd; 737 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma); 738 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma); 739 rx_sgl_task_params.num_sges = 1; 740 /* Set PAGE_SIZE for now since sg element is that size ??? */ 741 rx_sgl_task_params.total_buffer_size = PAGE_SIZE; 742 rx_sgl_task_params.small_mid_sge = 0; 743 744 745 /* 746 * Last arg is 0 as previous code did not set that we wanted the 747 * fc header information. 748 */ 749 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params, 750 &task_fc_hdr, 751 &tx_sgl_task_params, 752 &rx_sgl_task_params, 0); 753 } 754 755 /* Presumed that fcport->rport_lock is held */ 756 u16 qedf_get_sqe_idx(struct qedf_rport *fcport) 757 { 758 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe)); 759 u16 rval; 760 761 rval = fcport->sq_prod_idx; 762 763 /* Adjust ring index */ 764 fcport->sq_prod_idx++; 765 fcport->fw_sq_prod_idx++; 766 if (fcport->sq_prod_idx == total_sqe) 767 fcport->sq_prod_idx = 0; 768 769 return rval; 770 } 771 772 void qedf_ring_doorbell(struct qedf_rport *fcport) 773 { 774 struct fcoe_db_data dbell = { 0 }; 775 776 dbell.agg_flags = 0; 777 778 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT; 779 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT; 780 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD << 781 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT; 782 783 dbell.sq_prod = fcport->fw_sq_prod_idx; 784 /* wmb makes sure that the BDs data is updated before updating the 785 * producer, otherwise FW may read old data from the BDs. 786 */ 787 wmb(); 788 barrier(); 789 writel(*(u32 *)&dbell, fcport->p_doorbell); 790 /* 791 * Fence required to flush the write combined buffer, since another 792 * CPU may write to the same doorbell address and data may be lost 793 * due to relaxed order nature of write combined bar. 794 */ 795 wmb(); 796 } 797 798 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, 799 int8_t direction) 800 { 801 struct qedf_ctx *qedf = fcport->qedf; 802 struct qedf_io_log *io_log; 803 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 804 unsigned long flags; 805 uint8_t op; 806 807 spin_lock_irqsave(&qedf->io_trace_lock, flags); 808 809 io_log = &qedf->io_trace_buf[qedf->io_trace_idx]; 810 io_log->direction = direction; 811 io_log->task_id = io_req->xid; 812 io_log->port_id = fcport->rdata->ids.port_id; 813 io_log->lun = sc_cmd->device->lun; 814 io_log->op = op = sc_cmd->cmnd[0]; 815 io_log->lba[0] = sc_cmd->cmnd[2]; 816 io_log->lba[1] = sc_cmd->cmnd[3]; 817 io_log->lba[2] = sc_cmd->cmnd[4]; 818 io_log->lba[3] = sc_cmd->cmnd[5]; 819 io_log->bufflen = scsi_bufflen(sc_cmd); 820 io_log->sg_count = scsi_sg_count(sc_cmd); 821 io_log->result = sc_cmd->result; 822 io_log->jiffies = jiffies; 823 io_log->refcount = kref_read(&io_req->refcount); 824 825 if (direction == QEDF_IO_TRACE_REQ) { 826 /* For requests we only care abot the submission CPU */ 827 io_log->req_cpu = io_req->cpu; 828 io_log->int_cpu = 0; 829 io_log->rsp_cpu = 0; 830 } else if (direction == QEDF_IO_TRACE_RSP) { 831 io_log->req_cpu = io_req->cpu; 832 io_log->int_cpu = io_req->int_cpu; 833 io_log->rsp_cpu = smp_processor_id(); 834 } 835 836 io_log->sge_type = io_req->sge_type; 837 838 qedf->io_trace_idx++; 839 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE) 840 qedf->io_trace_idx = 0; 841 842 spin_unlock_irqrestore(&qedf->io_trace_lock, flags); 843 } 844 845 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) 846 { 847 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 848 struct Scsi_Host *host = sc_cmd->device->host; 849 struct fc_lport *lport = shost_priv(host); 850 struct qedf_ctx *qedf = lport_priv(lport); 851 struct e4_fcoe_task_context *task_ctx; 852 u16 xid; 853 enum fcoe_task_type req_type = 0; 854 struct fcoe_wqe *sqe; 855 u16 sqe_idx; 856 857 /* Initialize rest of io_req fileds */ 858 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 859 sc_cmd->SCp.ptr = (char *)io_req; 860 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */ 861 862 /* Record which cpu this request is associated with */ 863 io_req->cpu = smp_processor_id(); 864 865 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 866 req_type = FCOE_TASK_TYPE_READ_INITIATOR; 867 io_req->io_req_flags = QEDF_READ; 868 qedf->input_requests++; 869 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 870 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR; 871 io_req->io_req_flags = QEDF_WRITE; 872 qedf->output_requests++; 873 } else { 874 io_req->io_req_flags = 0; 875 qedf->control_requests++; 876 } 877 878 xid = io_req->xid; 879 880 /* Build buffer descriptor list for firmware from sg list */ 881 if (qedf_build_bd_list_from_sg(io_req)) { 882 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n"); 883 /* Release cmd will release io_req, but sc_cmd is assigned */ 884 io_req->sc_cmd = NULL; 885 kref_put(&io_req->refcount, qedf_release_cmd); 886 return -EAGAIN; 887 } 888 889 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || 890 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 891 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); 892 /* Release cmd will release io_req, but sc_cmd is assigned */ 893 io_req->sc_cmd = NULL; 894 kref_put(&io_req->refcount, qedf_release_cmd); 895 return -EINVAL; 896 } 897 898 /* Record LUN number for later use if we neeed them */ 899 io_req->lun = (int)sc_cmd->device->lun; 900 901 /* Obtain free SQE */ 902 sqe_idx = qedf_get_sqe_idx(fcport); 903 sqe = &fcport->sq[sqe_idx]; 904 memset(sqe, 0, sizeof(struct fcoe_wqe)); 905 906 /* Get the task context */ 907 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 908 if (!task_ctx) { 909 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n", 910 xid); 911 /* Release cmd will release io_req, but sc_cmd is assigned */ 912 io_req->sc_cmd = NULL; 913 kref_put(&io_req->refcount, qedf_release_cmd); 914 return -EINVAL; 915 } 916 917 qedf_init_task(fcport, lport, io_req, task_ctx, sqe); 918 919 /* Ring doorbell */ 920 qedf_ring_doorbell(fcport); 921 922 /* Set that command is with the firmware now */ 923 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 924 925 if (qedf_io_tracing && io_req->sc_cmd) 926 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ); 927 928 return false; 929 } 930 931 int 932 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) 933 { 934 struct fc_lport *lport = shost_priv(host); 935 struct qedf_ctx *qedf = lport_priv(lport); 936 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 937 struct fc_rport_libfc_priv *rp = rport->dd_data; 938 struct qedf_rport *fcport; 939 struct qedf_ioreq *io_req; 940 int rc = 0; 941 int rval; 942 unsigned long flags = 0; 943 int num_sgs = 0; 944 945 num_sgs = scsi_sg_count(sc_cmd); 946 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) { 947 QEDF_ERR(&qedf->dbg_ctx, 948 "Number of SG elements %d exceeds what hardware limitation of %d.\n", 949 num_sgs, QEDF_MAX_BDS_PER_CMD); 950 sc_cmd->result = DID_ERROR; 951 sc_cmd->scsi_done(sc_cmd); 952 return 0; 953 } 954 955 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 956 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 957 sc_cmd->result = DID_NO_CONNECT << 16; 958 sc_cmd->scsi_done(sc_cmd); 959 return 0; 960 } 961 962 if (!qedf->pdev->msix_enabled) { 963 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 964 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n", 965 sc_cmd); 966 sc_cmd->result = DID_NO_CONNECT << 16; 967 sc_cmd->scsi_done(sc_cmd); 968 return 0; 969 } 970 971 rval = fc_remote_port_chkready(rport); 972 if (rval) { 973 sc_cmd->result = rval; 974 sc_cmd->scsi_done(sc_cmd); 975 return 0; 976 } 977 978 /* Retry command if we are doing a qed drain operation */ 979 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 980 rc = SCSI_MLQUEUE_HOST_BUSY; 981 goto exit_qcmd; 982 } 983 984 if (lport->state != LPORT_ST_READY || 985 atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 986 rc = SCSI_MLQUEUE_HOST_BUSY; 987 goto exit_qcmd; 988 } 989 990 /* rport and tgt are allocated together, so tgt should be non-NULL */ 991 fcport = (struct qedf_rport *)&rp[1]; 992 993 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) || 994 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 995 /* 996 * Session is not offloaded yet. Let SCSI-ml retry 997 * the command. 998 */ 999 rc = SCSI_MLQUEUE_TARGET_BUSY; 1000 goto exit_qcmd; 1001 } 1002 1003 atomic_inc(&fcport->ios_to_queue); 1004 1005 if (fcport->retry_delay_timestamp) { 1006 if (time_after(jiffies, fcport->retry_delay_timestamp)) { 1007 fcport->retry_delay_timestamp = 0; 1008 } else { 1009 /* If retry_delay timer is active, flow off the ML */ 1010 rc = SCSI_MLQUEUE_TARGET_BUSY; 1011 atomic_dec(&fcport->ios_to_queue); 1012 goto exit_qcmd; 1013 } 1014 } 1015 1016 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD); 1017 if (!io_req) { 1018 rc = SCSI_MLQUEUE_HOST_BUSY; 1019 atomic_dec(&fcport->ios_to_queue); 1020 goto exit_qcmd; 1021 } 1022 1023 io_req->sc_cmd = sc_cmd; 1024 1025 /* Take fcport->rport_lock for posting to fcport send queue */ 1026 spin_lock_irqsave(&fcport->rport_lock, flags); 1027 if (qedf_post_io_req(fcport, io_req)) { 1028 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n"); 1029 /* Return SQE to pool */ 1030 atomic_inc(&fcport->free_sqes); 1031 rc = SCSI_MLQUEUE_HOST_BUSY; 1032 } 1033 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1034 atomic_dec(&fcport->ios_to_queue); 1035 1036 exit_qcmd: 1037 return rc; 1038 } 1039 1040 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, 1041 struct fcoe_cqe_rsp_info *fcp_rsp) 1042 { 1043 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1044 struct qedf_ctx *qedf = io_req->fcport->qedf; 1045 u8 rsp_flags = fcp_rsp->rsp_flags.flags; 1046 int fcp_sns_len = 0; 1047 int fcp_rsp_len = 0; 1048 uint8_t *rsp_info, *sense_data; 1049 1050 io_req->fcp_status = FC_GOOD; 1051 io_req->fcp_resid = 0; 1052 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | 1053 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) 1054 io_req->fcp_resid = fcp_rsp->fcp_resid; 1055 1056 io_req->scsi_comp_flags = rsp_flags; 1057 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1058 fcp_rsp->scsi_status_code; 1059 1060 if (rsp_flags & 1061 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) 1062 fcp_rsp_len = fcp_rsp->fcp_rsp_len; 1063 1064 if (rsp_flags & 1065 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) 1066 fcp_sns_len = fcp_rsp->fcp_sns_len; 1067 1068 io_req->fcp_rsp_len = fcp_rsp_len; 1069 io_req->fcp_sns_len = fcp_sns_len; 1070 rsp_info = sense_data = io_req->sense_buffer; 1071 1072 /* fetch fcp_rsp_code */ 1073 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1074 /* Only for task management function */ 1075 io_req->fcp_rsp_code = rsp_info[3]; 1076 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1077 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code); 1078 /* Adjust sense-data location. */ 1079 sense_data += fcp_rsp_len; 1080 } 1081 1082 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1083 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1084 "Truncating sense buffer\n"); 1085 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1086 } 1087 1088 /* The sense buffer can be NULL for TMF commands */ 1089 if (sc_cmd->sense_buffer) { 1090 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1091 if (fcp_sns_len) 1092 memcpy(sc_cmd->sense_buffer, sense_data, 1093 fcp_sns_len); 1094 } 1095 } 1096 1097 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) 1098 { 1099 struct scsi_cmnd *sc = io_req->sc_cmd; 1100 1101 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1102 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc), 1103 scsi_sg_count(sc), sc->sc_data_direction); 1104 io_req->bd_tbl->bd_valid = 0; 1105 } 1106 } 1107 1108 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1109 struct qedf_ioreq *io_req) 1110 { 1111 u16 xid; 1112 struct e4_fcoe_task_context *task_ctx; 1113 struct scsi_cmnd *sc_cmd; 1114 struct fcoe_cqe_rsp_info *fcp_rsp; 1115 struct qedf_rport *fcport; 1116 int refcount; 1117 u16 scope, qualifier = 0; 1118 u8 fw_residual_flag = 0; 1119 1120 if (!io_req) 1121 return; 1122 if (!cqe) 1123 return; 1124 1125 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1126 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || 1127 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { 1128 QEDF_ERR(&qedf->dbg_ctx, 1129 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n", 1130 io_req->xid); 1131 return; 1132 } 1133 1134 xid = io_req->xid; 1135 task_ctx = qedf_get_task_mem(&qedf->tasks, xid); 1136 sc_cmd = io_req->sc_cmd; 1137 fcp_rsp = &cqe->cqe_info.rsp_info; 1138 1139 if (!sc_cmd) { 1140 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1141 return; 1142 } 1143 1144 if (!sc_cmd->SCp.ptr) { 1145 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1146 "another context.\n"); 1147 return; 1148 } 1149 1150 if (!sc_cmd->device) { 1151 QEDF_ERR(&qedf->dbg_ctx, 1152 "Device for sc_cmd %p is NULL.\n", sc_cmd); 1153 return; 1154 } 1155 1156 if (!sc_cmd->request) { 1157 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " 1158 "sc_cmd=%p.\n", sc_cmd); 1159 return; 1160 } 1161 1162 if (!sc_cmd->request->q) { 1163 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " 1164 "is not valid, sc_cmd=%p.\n", sc_cmd); 1165 return; 1166 } 1167 1168 fcport = io_req->fcport; 1169 1170 /* 1171 * When flush is active, let the cmds be completed from the cleanup 1172 * context 1173 */ 1174 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || 1175 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) && 1176 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) { 1177 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1178 "Dropping good completion xid=0x%x as fcport is flushing", 1179 io_req->xid); 1180 return; 1181 } 1182 1183 qedf_parse_fcp_rsp(io_req, fcp_rsp); 1184 1185 qedf_unmap_sg_list(qedf, io_req); 1186 1187 /* Check for FCP transport error */ 1188 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) { 1189 QEDF_ERR(&(qedf->dbg_ctx), 1190 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d " 1191 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len, 1192 io_req->fcp_rsp_code); 1193 sc_cmd->result = DID_BUS_BUSY << 16; 1194 goto out; 1195 } 1196 1197 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags, 1198 FCOE_CQE_RSP_INFO_FW_UNDERRUN); 1199 if (fw_residual_flag) { 1200 QEDF_ERR(&qedf->dbg_ctx, 1201 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n", 1202 io_req->xid, fcp_rsp->rsp_flags.flags, 1203 io_req->fcp_resid, 1204 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2], 1205 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]); 1206 1207 if (io_req->cdb_status == 0) 1208 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status; 1209 else 1210 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1211 1212 /* 1213 * Set resid to the whole buffer length so we won't try to resue 1214 * any previously data. 1215 */ 1216 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1217 goto out; 1218 } 1219 1220 switch (io_req->fcp_status) { 1221 case FC_GOOD: 1222 if (io_req->cdb_status == 0) { 1223 /* Good I/O completion */ 1224 sc_cmd->result = DID_OK << 16; 1225 } else { 1226 refcount = kref_read(&io_req->refcount); 1227 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1228 "%d:0:%d:%lld xid=0x%0x op=0x%02x " 1229 "lba=%02x%02x%02x%02x cdb_status=%d " 1230 "fcp_resid=0x%x refcount=%d.\n", 1231 qedf->lport->host->host_no, sc_cmd->device->id, 1232 sc_cmd->device->lun, io_req->xid, 1233 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3], 1234 sc_cmd->cmnd[4], sc_cmd->cmnd[5], 1235 io_req->cdb_status, io_req->fcp_resid, 1236 refcount); 1237 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1238 1239 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1240 io_req->cdb_status == SAM_STAT_BUSY) { 1241 /* 1242 * Check whether we need to set retry_delay at 1243 * all based on retry_delay module parameter 1244 * and the status qualifier. 1245 */ 1246 1247 /* Upper 2 bits */ 1248 scope = fcp_rsp->retry_delay_timer & 0xC000; 1249 /* Lower 14 bits */ 1250 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF; 1251 1252 if (qedf_retry_delay && 1253 scope > 0 && qualifier > 0 && 1254 qualifier <= 0x3FEF) { 1255 /* Check we don't go over the max */ 1256 if (qualifier > QEDF_RETRY_DELAY_MAX) 1257 qualifier = 1258 QEDF_RETRY_DELAY_MAX; 1259 fcport->retry_delay_timestamp = 1260 jiffies + (qualifier * HZ / 10); 1261 } 1262 /* Record stats */ 1263 if (io_req->cdb_status == 1264 SAM_STAT_TASK_SET_FULL) 1265 qedf->task_set_fulls++; 1266 else 1267 qedf->busy++; 1268 } 1269 } 1270 if (io_req->fcp_resid) 1271 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1272 break; 1273 default: 1274 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n", 1275 io_req->fcp_status); 1276 break; 1277 } 1278 1279 out: 1280 if (qedf_io_tracing) 1281 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP); 1282 1283 /* 1284 * We wait till the end of the function to clear the 1285 * outstanding bit in case we need to send an abort 1286 */ 1287 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 1288 1289 io_req->sc_cmd = NULL; 1290 sc_cmd->SCp.ptr = NULL; 1291 sc_cmd->scsi_done(sc_cmd); 1292 kref_put(&io_req->refcount, qedf_release_cmd); 1293 } 1294 1295 /* Return a SCSI command in some other context besides a normal completion */ 1296 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, 1297 int result) 1298 { 1299 u16 xid; 1300 struct scsi_cmnd *sc_cmd; 1301 int refcount; 1302 1303 if (!io_req) 1304 return; 1305 1306 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { 1307 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1308 "io_req:%p scsi_done handling already done\n", 1309 io_req); 1310 return; 1311 } 1312 1313 /* 1314 * We will be done with this command after this call so clear the 1315 * outstanding bit. 1316 */ 1317 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 1318 1319 xid = io_req->xid; 1320 sc_cmd = io_req->sc_cmd; 1321 1322 if (!sc_cmd) { 1323 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n"); 1324 return; 1325 } 1326 1327 if (!virt_addr_valid(sc_cmd)) { 1328 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd); 1329 goto bad_scsi_ptr; 1330 } 1331 1332 if (!sc_cmd->SCp.ptr) { 1333 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in " 1334 "another context.\n"); 1335 return; 1336 } 1337 1338 if (!sc_cmd->device) { 1339 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n", 1340 sc_cmd); 1341 goto bad_scsi_ptr; 1342 } 1343 1344 if (!virt_addr_valid(sc_cmd->device)) { 1345 QEDF_ERR(&qedf->dbg_ctx, 1346 "Device pointer for sc_cmd %p is bad.\n", sc_cmd); 1347 goto bad_scsi_ptr; 1348 } 1349 1350 if (!sc_cmd->sense_buffer) { 1351 QEDF_ERR(&qedf->dbg_ctx, 1352 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n", 1353 sc_cmd); 1354 goto bad_scsi_ptr; 1355 } 1356 1357 if (!virt_addr_valid(sc_cmd->sense_buffer)) { 1358 QEDF_ERR(&qedf->dbg_ctx, 1359 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n", 1360 sc_cmd); 1361 goto bad_scsi_ptr; 1362 } 1363 1364 if (!sc_cmd->scsi_done) { 1365 QEDF_ERR(&qedf->dbg_ctx, 1366 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n", 1367 sc_cmd); 1368 goto bad_scsi_ptr; 1369 } 1370 1371 qedf_unmap_sg_list(qedf, io_req); 1372 1373 sc_cmd->result = result << 16; 1374 refcount = kref_read(&io_req->refcount); 1375 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing " 1376 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, " 1377 "allowed=%d retries=%d refcount=%d.\n", 1378 qedf->lport->host->host_no, sc_cmd->device->id, 1379 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0], 1380 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4], 1381 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries, 1382 refcount); 1383 1384 /* 1385 * Set resid to the whole buffer length so we won't try to resue any 1386 * previously read data 1387 */ 1388 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 1389 1390 if (qedf_io_tracing) 1391 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP); 1392 1393 io_req->sc_cmd = NULL; 1394 sc_cmd->SCp.ptr = NULL; 1395 sc_cmd->scsi_done(sc_cmd); 1396 kref_put(&io_req->refcount, qedf_release_cmd); 1397 return; 1398 1399 bad_scsi_ptr: 1400 /* 1401 * Clear the io_req->sc_cmd backpointer so we don't try to process 1402 * this again 1403 */ 1404 io_req->sc_cmd = NULL; 1405 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */ 1406 } 1407 1408 /* 1409 * Handle warning type CQE completions. This is mainly used for REC timer 1410 * popping. 1411 */ 1412 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1413 struct qedf_ioreq *io_req) 1414 { 1415 int rval, i; 1416 struct qedf_rport *fcport = io_req->fcport; 1417 u64 err_warn_bit_map; 1418 u8 err_warn = 0xff; 1419 1420 if (!cqe) 1421 return; 1422 1423 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " 1424 "xid=0x%x\n", io_req->xid); 1425 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1426 "err_warn_bitmap=%08x:%08x\n", 1427 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1428 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1429 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1430 "rx_buff_off=%08x, rx_id=%04x\n", 1431 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1432 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1433 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1434 1435 /* Normalize the error bitmap value to an just an unsigned int */ 1436 err_warn_bit_map = (u64) 1437 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) | 1438 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo; 1439 for (i = 0; i < 64; i++) { 1440 if (err_warn_bit_map & (u64)((u64)1 << i)) { 1441 err_warn = i; 1442 break; 1443 } 1444 } 1445 1446 /* Check if REC TOV expired if this is a tape device */ 1447 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { 1448 if (err_warn == 1449 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) { 1450 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n"); 1451 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) { 1452 io_req->rx_buf_off = 1453 cqe->cqe_info.err_info.rx_buf_off; 1454 io_req->tx_buf_off = 1455 cqe->cqe_info.err_info.tx_buf_off; 1456 io_req->rx_id = cqe->cqe_info.err_info.rx_id; 1457 rval = qedf_send_rec(io_req); 1458 /* 1459 * We only want to abort the io_req if we 1460 * can't queue the REC command as we want to 1461 * keep the exchange open for recovery. 1462 */ 1463 if (rval) 1464 goto send_abort; 1465 } 1466 return; 1467 } 1468 } 1469 1470 send_abort: 1471 init_completion(&io_req->abts_done); 1472 rval = qedf_initiate_abts(io_req, true); 1473 if (rval) 1474 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1475 } 1476 1477 /* Cleanup a command when we receive an error detection completion */ 1478 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1479 struct qedf_ioreq *io_req) 1480 { 1481 int rval; 1482 1483 if (!cqe) 1484 return; 1485 1486 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " 1487 "xid=0x%x\n", io_req->xid); 1488 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), 1489 "err_warn_bitmap=%08x:%08x\n", 1490 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi), 1491 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo)); 1492 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, " 1493 "rx_buff_off=%08x, rx_id=%04x\n", 1494 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off), 1495 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off), 1496 le32_to_cpu(cqe->cqe_info.err_info.rx_id)); 1497 1498 if (qedf->stop_io_on_error) { 1499 qedf_stop_all_io(qedf); 1500 return; 1501 } 1502 1503 init_completion(&io_req->abts_done); 1504 rval = qedf_initiate_abts(io_req, true); 1505 if (rval) 1506 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 1507 } 1508 1509 static void qedf_flush_els_req(struct qedf_ctx *qedf, 1510 struct qedf_ioreq *els_req) 1511 { 1512 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1513 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid, 1514 kref_read(&els_req->refcount)); 1515 1516 /* 1517 * Need to distinguish this from a timeout when calling the 1518 * els_req->cb_func. 1519 */ 1520 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH; 1521 1522 /* Cancel the timer */ 1523 cancel_delayed_work_sync(&els_req->timeout_work); 1524 1525 /* Call callback function to complete command */ 1526 if (els_req->cb_func && els_req->cb_arg) { 1527 els_req->cb_func(els_req->cb_arg); 1528 els_req->cb_arg = NULL; 1529 } 1530 1531 /* Release kref for original initiate_els */ 1532 kref_put(&els_req->refcount, qedf_release_cmd); 1533 } 1534 1535 /* A value of -1 for lun is a wild card that means flush all 1536 * active SCSI I/Os for the target. 1537 */ 1538 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) 1539 { 1540 struct qedf_ioreq *io_req; 1541 struct qedf_ctx *qedf; 1542 struct qedf_cmd_mgr *cmd_mgr; 1543 int i, rc; 1544 unsigned long flags; 1545 int flush_cnt = 0; 1546 int wait_cnt = 100; 1547 int refcount = 0; 1548 1549 if (!fcport) 1550 return; 1551 1552 /* Check that fcport is still offloaded */ 1553 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1554 QEDF_ERR(NULL, "fcport is no longer offloaded.\n"); 1555 return; 1556 } 1557 1558 qedf = fcport->qedf; 1559 1560 if (!qedf) { 1561 QEDF_ERR(NULL, "qedf is NULL.\n"); 1562 return; 1563 } 1564 1565 /* Only wait for all commands to be queued in the Upload context */ 1566 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && 1567 (lun == -1)) { 1568 while (atomic_read(&fcport->ios_to_queue)) { 1569 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1570 "Waiting for %d I/Os to be queued\n", 1571 atomic_read(&fcport->ios_to_queue)); 1572 if (wait_cnt == 0) { 1573 QEDF_ERR(NULL, 1574 "%d IOs request could not be queued\n", 1575 atomic_read(&fcport->ios_to_queue)); 1576 } 1577 msleep(20); 1578 wait_cnt--; 1579 } 1580 } 1581 1582 cmd_mgr = qedf->cmd_mgr; 1583 1584 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1585 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n", 1586 atomic_read(&fcport->num_active_ios), fcport, 1587 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id); 1588 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n"); 1589 1590 mutex_lock(&qedf->flush_mutex); 1591 if (lun == -1) { 1592 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); 1593 } else { 1594 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); 1595 fcport->lun_reset_lun = lun; 1596 } 1597 1598 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 1599 io_req = &cmd_mgr->cmds[i]; 1600 1601 if (!io_req) 1602 continue; 1603 if (!io_req->fcport) 1604 continue; 1605 1606 spin_lock_irqsave(&cmd_mgr->lock, flags); 1607 1608 if (io_req->alloc) { 1609 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { 1610 if (io_req->cmd_type == QEDF_SCSI_CMD) 1611 QEDF_ERR(&qedf->dbg_ctx, 1612 "Allocated but not queued, xid=0x%x\n", 1613 io_req->xid); 1614 } 1615 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 1616 } else { 1617 spin_unlock_irqrestore(&cmd_mgr->lock, flags); 1618 continue; 1619 } 1620 1621 if (io_req->fcport != fcport) 1622 continue; 1623 1624 /* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response, 1625 * but RRQ is still pending. 1626 * Workaround: Within qedf_send_rrq, we check if the fcport is 1627 * NULL, and we drop the ref on the io_req to clean it up. 1628 */ 1629 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) { 1630 refcount = kref_read(&io_req->refcount); 1631 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1632 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n", 1633 io_req->xid, io_req->cmd_type, refcount); 1634 /* If RRQ work has been queue, try to cancel it and 1635 * free the io_req 1636 */ 1637 if (atomic_read(&io_req->state) == 1638 QEDFC_CMD_ST_RRQ_WAIT) { 1639 if (cancel_delayed_work_sync 1640 (&io_req->rrq_work)) { 1641 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1642 "Putting reference for pending RRQ work xid=0x%x.\n", 1643 io_req->xid); 1644 /* ID: 003 */ 1645 kref_put(&io_req->refcount, 1646 qedf_release_cmd); 1647 } 1648 } 1649 continue; 1650 } 1651 1652 /* Only consider flushing ELS during target reset */ 1653 if (io_req->cmd_type == QEDF_ELS && 1654 lun == -1) { 1655 rc = kref_get_unless_zero(&io_req->refcount); 1656 if (!rc) { 1657 QEDF_ERR(&(qedf->dbg_ctx), 1658 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n", 1659 io_req, io_req->xid); 1660 continue; 1661 } 1662 flush_cnt++; 1663 qedf_flush_els_req(qedf, io_req); 1664 /* 1665 * Release the kref and go back to the top of the 1666 * loop. 1667 */ 1668 goto free_cmd; 1669 } 1670 1671 if (io_req->cmd_type == QEDF_ABTS) { 1672 /* ID: 004 */ 1673 rc = kref_get_unless_zero(&io_req->refcount); 1674 if (!rc) { 1675 QEDF_ERR(&(qedf->dbg_ctx), 1676 "Could not get kref for abort io_req=0x%p xid=0x%x.\n", 1677 io_req, io_req->xid); 1678 continue; 1679 } 1680 if (lun != -1 && io_req->lun != lun) 1681 goto free_cmd; 1682 1683 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1684 "Flushing abort xid=0x%x.\n", io_req->xid); 1685 1686 if (cancel_delayed_work_sync(&io_req->rrq_work)) { 1687 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1688 "Putting ref for cancelled RRQ work xid=0x%x.\n", 1689 io_req->xid); 1690 kref_put(&io_req->refcount, qedf_release_cmd); 1691 } 1692 1693 if (cancel_delayed_work_sync(&io_req->timeout_work)) { 1694 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1695 "Putting ref for cancelled tmo work xid=0x%x.\n", 1696 io_req->xid); 1697 qedf_initiate_cleanup(io_req, true); 1698 /* Notify eh_abort handler that ABTS is 1699 * complete 1700 */ 1701 complete(&io_req->abts_done); 1702 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1703 /* ID: 002 */ 1704 kref_put(&io_req->refcount, qedf_release_cmd); 1705 } 1706 flush_cnt++; 1707 goto free_cmd; 1708 } 1709 1710 if (!io_req->sc_cmd) 1711 continue; 1712 if (!io_req->sc_cmd->device) { 1713 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1714 "Device backpointer NULL for sc_cmd=%p.\n", 1715 io_req->sc_cmd); 1716 /* Put reference for non-existent scsi_cmnd */ 1717 io_req->sc_cmd = NULL; 1718 qedf_initiate_cleanup(io_req, false); 1719 kref_put(&io_req->refcount, qedf_release_cmd); 1720 continue; 1721 } 1722 if (lun > -1) { 1723 if (io_req->lun != lun) 1724 continue; 1725 } 1726 1727 /* 1728 * Use kref_get_unless_zero in the unlikely case the command 1729 * we're about to flush was completed in the normal SCSI path 1730 */ 1731 rc = kref_get_unless_zero(&io_req->refcount); 1732 if (!rc) { 1733 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for " 1734 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid); 1735 continue; 1736 } 1737 1738 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 1739 "Cleanup xid=0x%x.\n", io_req->xid); 1740 flush_cnt++; 1741 1742 /* Cleanup task and return I/O mid-layer */ 1743 qedf_initiate_cleanup(io_req, true); 1744 1745 free_cmd: 1746 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */ 1747 } 1748 1749 wait_cnt = 60; 1750 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1751 "Flushed 0x%x I/Os, active=0x%x.\n", 1752 flush_cnt, atomic_read(&fcport->num_active_ios)); 1753 /* Only wait for all commands to complete in the Upload context */ 1754 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) && 1755 (lun == -1)) { 1756 while (atomic_read(&fcport->num_active_ios)) { 1757 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1758 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n", 1759 flush_cnt, 1760 atomic_read(&fcport->num_active_ios), 1761 wait_cnt); 1762 if (wait_cnt == 0) { 1763 QEDF_ERR(&qedf->dbg_ctx, 1764 "Flushed %d I/Os, active=%d.\n", 1765 flush_cnt, 1766 atomic_read(&fcport->num_active_ios)); 1767 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) { 1768 io_req = &cmd_mgr->cmds[i]; 1769 if (io_req->fcport && 1770 io_req->fcport == fcport) { 1771 refcount = 1772 kref_read(&io_req->refcount); 1773 set_bit(QEDF_CMD_DIRTY, 1774 &io_req->flags); 1775 QEDF_ERR(&qedf->dbg_ctx, 1776 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n", 1777 io_req, io_req->xid, 1778 io_req->flags, 1779 io_req->sc_cmd, 1780 refcount, 1781 io_req->cmd_type); 1782 } 1783 } 1784 WARN_ON(1); 1785 break; 1786 } 1787 msleep(500); 1788 wait_cnt--; 1789 } 1790 } 1791 1792 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags); 1793 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags); 1794 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n"); 1795 mutex_unlock(&qedf->flush_mutex); 1796 } 1797 1798 /* 1799 * Initiate a ABTS middle path command. Note that we don't have to initialize 1800 * the task context for an ABTS task. 1801 */ 1802 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) 1803 { 1804 struct fc_lport *lport; 1805 struct qedf_rport *fcport = io_req->fcport; 1806 struct fc_rport_priv *rdata; 1807 struct qedf_ctx *qedf; 1808 u16 xid; 1809 u32 r_a_tov = 0; 1810 int rc = 0; 1811 unsigned long flags; 1812 struct fcoe_wqe *sqe; 1813 u16 sqe_idx; 1814 int refcount = 0; 1815 1816 /* Sanity check qedf_rport before dereferencing any pointers */ 1817 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1818 QEDF_ERR(NULL, "tgt not offloaded\n"); 1819 rc = 1; 1820 goto out; 1821 } 1822 1823 qedf = fcport->qedf; 1824 rdata = fcport->rdata; 1825 1826 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 1827 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n"); 1828 rc = 1; 1829 goto out; 1830 } 1831 1832 r_a_tov = rdata->r_a_tov; 1833 lport = qedf->lport; 1834 1835 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 1836 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 1837 rc = 1; 1838 goto drop_rdata_kref; 1839 } 1840 1841 if (atomic_read(&qedf->link_down_tmo_valid) > 0) { 1842 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n"); 1843 rc = 1; 1844 goto drop_rdata_kref; 1845 } 1846 1847 /* Ensure room on SQ */ 1848 if (!atomic_read(&fcport->free_sqes)) { 1849 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 1850 rc = 1; 1851 goto drop_rdata_kref; 1852 } 1853 1854 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 1855 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n"); 1856 rc = 1; 1857 goto drop_rdata_kref; 1858 } 1859 1860 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 1861 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) || 1862 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) { 1863 QEDF_ERR(&qedf->dbg_ctx, 1864 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n", 1865 io_req->xid, io_req->sc_cmd); 1866 rc = 1; 1867 goto drop_rdata_kref; 1868 } 1869 1870 kref_get(&io_req->refcount); 1871 1872 xid = io_req->xid; 1873 qedf->control_requests++; 1874 qedf->packet_aborts++; 1875 1876 /* Set the command type to abort */ 1877 io_req->cmd_type = QEDF_ABTS; 1878 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 1879 1880 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1881 refcount = kref_read(&io_req->refcount); 1882 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, 1883 "ABTS io_req xid = 0x%x refcount=%d\n", 1884 xid, refcount); 1885 1886 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT); 1887 1888 spin_lock_irqsave(&fcport->rport_lock, flags); 1889 1890 sqe_idx = qedf_get_sqe_idx(fcport); 1891 sqe = &fcport->sq[sqe_idx]; 1892 memset(sqe, 0, sizeof(struct fcoe_wqe)); 1893 io_req->task_params->sqe = sqe; 1894 1895 init_initiator_abort_fcoe_task(io_req->task_params); 1896 qedf_ring_doorbell(fcport); 1897 1898 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1899 1900 drop_rdata_kref: 1901 kref_put(&rdata->kref, fc_rport_destroy); 1902 out: 1903 return rc; 1904 } 1905 1906 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 1907 struct qedf_ioreq *io_req) 1908 { 1909 uint32_t r_ctl; 1910 uint16_t xid; 1911 int rc; 1912 struct qedf_rport *fcport = io_req->fcport; 1913 1914 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = " 1915 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type); 1916 1917 xid = io_req->xid; 1918 r_ctl = cqe->cqe_info.abts_info.r_ctl; 1919 1920 /* This was added at a point when we were scheduling abts_compl & 1921 * cleanup_compl on different CPUs and there was a possibility of 1922 * the io_req to be freed from the other context before we got here. 1923 */ 1924 if (!fcport) { 1925 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1926 "Dropping ABTS completion xid=0x%x as fcport is NULL", 1927 io_req->xid); 1928 return; 1929 } 1930 1931 /* 1932 * When flush is active, let the cmds be completed from the cleanup 1933 * context 1934 */ 1935 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) || 1936 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) { 1937 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 1938 "Dropping ABTS completion xid=0x%x as fcport is flushing", 1939 io_req->xid); 1940 return; 1941 } 1942 1943 if (!cancel_delayed_work(&io_req->timeout_work)) { 1944 QEDF_ERR(&qedf->dbg_ctx, 1945 "Wasn't able to cancel abts timeout work.\n"); 1946 } 1947 1948 switch (r_ctl) { 1949 case FC_RCTL_BA_ACC: 1950 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1951 "ABTS response - ACC Send RRQ after R_A_TOV\n"); 1952 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS; 1953 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */ 1954 if (!rc) { 1955 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, 1956 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n", 1957 io_req->xid); 1958 return; 1959 } 1960 /* 1961 * Dont release this cmd yet. It will be relesed 1962 * after we get RRQ response 1963 */ 1964 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work, 1965 msecs_to_jiffies(qedf->lport->r_a_tov)); 1966 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT); 1967 break; 1968 /* For error cases let the cleanup return the command */ 1969 case FC_RCTL_BA_RJT: 1970 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, 1971 "ABTS response - RJT\n"); 1972 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED; 1973 break; 1974 default: 1975 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n"); 1976 break; 1977 } 1978 1979 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); 1980 1981 if (io_req->sc_cmd) { 1982 if (io_req->return_scsi_cmd_on_abts) 1983 qedf_scsi_done(qedf, io_req, DID_ERROR); 1984 } 1985 1986 /* Notify eh_abort handler that ABTS is complete */ 1987 complete(&io_req->abts_done); 1988 1989 kref_put(&io_req->refcount, qedf_release_cmd); 1990 } 1991 1992 int qedf_init_mp_req(struct qedf_ioreq *io_req) 1993 { 1994 struct qedf_mp_req *mp_req; 1995 struct scsi_sge *mp_req_bd; 1996 struct scsi_sge *mp_resp_bd; 1997 struct qedf_ctx *qedf = io_req->fcport->qedf; 1998 dma_addr_t addr; 1999 uint64_t sz; 2000 2001 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n"); 2002 2003 mp_req = (struct qedf_mp_req *)&(io_req->mp_req); 2004 memset(mp_req, 0, sizeof(struct qedf_mp_req)); 2005 2006 if (io_req->cmd_type != QEDF_ELS) { 2007 mp_req->req_len = sizeof(struct fcp_cmnd); 2008 io_req->data_xfer_len = mp_req->req_len; 2009 } else 2010 mp_req->req_len = io_req->data_xfer_len; 2011 2012 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 2013 &mp_req->req_buf_dma, GFP_KERNEL); 2014 if (!mp_req->req_buf) { 2015 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n"); 2016 qedf_free_mp_resc(io_req); 2017 return -ENOMEM; 2018 } 2019 2020 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev, 2021 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL); 2022 if (!mp_req->resp_buf) { 2023 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp " 2024 "buffer\n"); 2025 qedf_free_mp_resc(io_req); 2026 return -ENOMEM; 2027 } 2028 2029 /* Allocate and map mp_req_bd and mp_resp_bd */ 2030 sz = sizeof(struct scsi_sge); 2031 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 2032 &mp_req->mp_req_bd_dma, GFP_KERNEL); 2033 if (!mp_req->mp_req_bd) { 2034 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n"); 2035 qedf_free_mp_resc(io_req); 2036 return -ENOMEM; 2037 } 2038 2039 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz, 2040 &mp_req->mp_resp_bd_dma, GFP_KERNEL); 2041 if (!mp_req->mp_resp_bd) { 2042 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n"); 2043 qedf_free_mp_resc(io_req); 2044 return -ENOMEM; 2045 } 2046 2047 /* Fill bd table */ 2048 addr = mp_req->req_buf_dma; 2049 mp_req_bd = mp_req->mp_req_bd; 2050 mp_req_bd->sge_addr.lo = U64_LO(addr); 2051 mp_req_bd->sge_addr.hi = U64_HI(addr); 2052 mp_req_bd->sge_len = QEDF_PAGE_SIZE; 2053 2054 /* 2055 * MP buffer is either a task mgmt command or an ELS. 2056 * So the assumption is that it consumes a single bd 2057 * entry in the bd table 2058 */ 2059 mp_resp_bd = mp_req->mp_resp_bd; 2060 addr = mp_req->resp_buf_dma; 2061 mp_resp_bd->sge_addr.lo = U64_LO(addr); 2062 mp_resp_bd->sge_addr.hi = U64_HI(addr); 2063 mp_resp_bd->sge_len = QEDF_PAGE_SIZE; 2064 2065 return 0; 2066 } 2067 2068 /* 2069 * Last ditch effort to clear the port if it's stuck. Used only after a 2070 * cleanup task times out. 2071 */ 2072 static void qedf_drain_request(struct qedf_ctx *qedf) 2073 { 2074 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { 2075 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n"); 2076 return; 2077 } 2078 2079 /* Set bit to return all queuecommand requests as busy */ 2080 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 2081 2082 /* Call qed drain request for function. Should be synchronous */ 2083 qed_ops->common->drain(qedf->cdev); 2084 2085 /* Settle time for CQEs to be returned */ 2086 msleep(100); 2087 2088 /* Unplug and continue */ 2089 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags); 2090 } 2091 2092 /* 2093 * Returns SUCCESS if the cleanup task does not timeout, otherwise return 2094 * FAILURE. 2095 */ 2096 int qedf_initiate_cleanup(struct qedf_ioreq *io_req, 2097 bool return_scsi_cmd_on_abts) 2098 { 2099 struct qedf_rport *fcport; 2100 struct qedf_ctx *qedf; 2101 uint16_t xid; 2102 struct e4_fcoe_task_context *task; 2103 int tmo = 0; 2104 int rc = SUCCESS; 2105 unsigned long flags; 2106 struct fcoe_wqe *sqe; 2107 u16 sqe_idx; 2108 int refcount = 0; 2109 2110 fcport = io_req->fcport; 2111 if (!fcport) { 2112 QEDF_ERR(NULL, "fcport is NULL.\n"); 2113 return SUCCESS; 2114 } 2115 2116 /* Sanity check qedf_rport before dereferencing any pointers */ 2117 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2118 QEDF_ERR(NULL, "tgt not offloaded\n"); 2119 rc = 1; 2120 return SUCCESS; 2121 } 2122 2123 qedf = fcport->qedf; 2124 if (!qedf) { 2125 QEDF_ERR(NULL, "qedf is NULL.\n"); 2126 return SUCCESS; 2127 } 2128 2129 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) || 2130 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) { 2131 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in " 2132 "cleanup processing or already completed.\n", 2133 io_req->xid); 2134 return SUCCESS; 2135 } 2136 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2137 2138 /* Ensure room on SQ */ 2139 if (!atomic_read(&fcport->free_sqes)) { 2140 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n"); 2141 /* Need to make sure we clear the flag since it was set */ 2142 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2143 return FAILED; 2144 } 2145 2146 if (io_req->cmd_type == QEDF_CLEANUP) { 2147 QEDF_ERR(&qedf->dbg_ctx, 2148 "io_req=0x%x is already a cleanup command cmd_type=%d.\n", 2149 io_req->xid, io_req->cmd_type); 2150 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2151 return SUCCESS; 2152 } 2153 2154 refcount = kref_read(&io_req->refcount); 2155 2156 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, 2157 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n", 2158 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags, 2159 refcount, fcport, fcport->rdata->ids.port_id); 2160 2161 /* Cleanup cmds re-use the same TID as the original I/O */ 2162 xid = io_req->xid; 2163 io_req->cmd_type = QEDF_CLEANUP; 2164 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts; 2165 2166 task = qedf_get_task_mem(&qedf->tasks, xid); 2167 2168 init_completion(&io_req->cleanup_done); 2169 2170 spin_lock_irqsave(&fcport->rport_lock, flags); 2171 2172 sqe_idx = qedf_get_sqe_idx(fcport); 2173 sqe = &fcport->sq[sqe_idx]; 2174 memset(sqe, 0, sizeof(struct fcoe_wqe)); 2175 io_req->task_params->sqe = sqe; 2176 2177 init_initiator_cleanup_fcoe_task(io_req->task_params); 2178 qedf_ring_doorbell(fcport); 2179 2180 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2181 2182 tmo = wait_for_completion_timeout(&io_req->cleanup_done, 2183 QEDF_CLEANUP_TIMEOUT * HZ); 2184 2185 if (!tmo) { 2186 rc = FAILED; 2187 /* Timeout case */ 2188 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, " 2189 "xid=%x.\n", io_req->xid); 2190 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2191 /* Issue a drain request if cleanup task times out */ 2192 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n"); 2193 qedf_drain_request(qedf); 2194 } 2195 2196 /* If it TASK MGMT handle it, reference will be decreased 2197 * in qedf_execute_tmf 2198 */ 2199 if (io_req->tm_flags == FCP_TMF_LUN_RESET || 2200 io_req->tm_flags == FCP_TMF_TGT_RESET) { 2201 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2202 io_req->sc_cmd = NULL; 2203 complete(&io_req->tm_done); 2204 } 2205 2206 if (io_req->sc_cmd) { 2207 if (io_req->return_scsi_cmd_on_abts) 2208 qedf_scsi_done(qedf, io_req, DID_ERROR); 2209 } 2210 2211 if (rc == SUCCESS) 2212 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS; 2213 else 2214 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED; 2215 2216 return rc; 2217 } 2218 2219 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 2220 struct qedf_ioreq *io_req) 2221 { 2222 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n", 2223 io_req->xid); 2224 2225 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags); 2226 2227 /* Complete so we can finish cleaning up the I/O */ 2228 complete(&io_req->cleanup_done); 2229 } 2230 2231 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, 2232 uint8_t tm_flags) 2233 { 2234 struct qedf_ioreq *io_req; 2235 struct e4_fcoe_task_context *task; 2236 struct qedf_ctx *qedf = fcport->qedf; 2237 struct fc_lport *lport = qedf->lport; 2238 int rc = 0; 2239 uint16_t xid; 2240 int tmo = 0; 2241 int lun = 0; 2242 unsigned long flags; 2243 struct fcoe_wqe *sqe; 2244 u16 sqe_idx; 2245 2246 if (!sc_cmd) { 2247 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); 2248 return FAILED; 2249 } 2250 2251 lun = (int)sc_cmd->device->lun; 2252 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2253 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n"); 2254 rc = FAILED; 2255 goto no_flush; 2256 } 2257 2258 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD); 2259 if (!io_req) { 2260 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF"); 2261 rc = -EAGAIN; 2262 goto no_flush; 2263 } 2264 2265 if (tm_flags == FCP_TMF_LUN_RESET) 2266 qedf->lun_resets++; 2267 else if (tm_flags == FCP_TMF_TGT_RESET) 2268 qedf->target_resets++; 2269 2270 /* Initialize rest of io_req fields */ 2271 io_req->sc_cmd = sc_cmd; 2272 io_req->fcport = fcport; 2273 io_req->cmd_type = QEDF_TASK_MGMT_CMD; 2274 2275 /* Record which cpu this request is associated with */ 2276 io_req->cpu = smp_processor_id(); 2277 2278 /* Set TM flags */ 2279 io_req->io_req_flags = QEDF_READ; 2280 io_req->data_xfer_len = 0; 2281 io_req->tm_flags = tm_flags; 2282 2283 /* Default is to return a SCSI command when an error occurs */ 2284 io_req->return_scsi_cmd_on_abts = false; 2285 2286 /* Obtain exchange id */ 2287 xid = io_req->xid; 2288 2289 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = " 2290 "0x%x\n", xid); 2291 2292 /* Initialize task context for this IO request */ 2293 task = qedf_get_task_mem(&qedf->tasks, xid); 2294 2295 init_completion(&io_req->tm_done); 2296 2297 spin_lock_irqsave(&fcport->rport_lock, flags); 2298 2299 sqe_idx = qedf_get_sqe_idx(fcport); 2300 sqe = &fcport->sq[sqe_idx]; 2301 memset(sqe, 0, sizeof(struct fcoe_wqe)); 2302 2303 qedf_init_task(fcport, lport, io_req, task, sqe); 2304 qedf_ring_doorbell(fcport); 2305 2306 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2307 2308 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2309 tmo = wait_for_completion_timeout(&io_req->tm_done, 2310 QEDF_TM_TIMEOUT * HZ); 2311 2312 if (!tmo) { 2313 rc = FAILED; 2314 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n"); 2315 /* Clear outstanding bit since command timed out */ 2316 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2317 io_req->sc_cmd = NULL; 2318 } else { 2319 /* Check TMF response code */ 2320 if (io_req->fcp_rsp_code == 0) 2321 rc = SUCCESS; 2322 else 2323 rc = FAILED; 2324 } 2325 /* 2326 * Double check that fcport has not gone into an uploading state before 2327 * executing the command flush for the LUN/target. 2328 */ 2329 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 2330 QEDF_ERR(&qedf->dbg_ctx, 2331 "fcport is uploading, not executing flush.\n"); 2332 goto no_flush; 2333 } 2334 /* We do not need this io_req any more */ 2335 kref_put(&io_req->refcount, qedf_release_cmd); 2336 2337 2338 if (tm_flags == FCP_TMF_LUN_RESET) 2339 qedf_flush_active_ios(fcport, lun); 2340 else 2341 qedf_flush_active_ios(fcport, -1); 2342 2343 no_flush: 2344 if (rc != SUCCESS) { 2345 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n"); 2346 rc = FAILED; 2347 } else { 2348 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n"); 2349 rc = SUCCESS; 2350 } 2351 return rc; 2352 } 2353 2354 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 2355 { 2356 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 2357 struct fc_rport_libfc_priv *rp = rport->dd_data; 2358 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1]; 2359 struct qedf_ctx *qedf; 2360 struct fc_lport *lport = shost_priv(sc_cmd->device->host); 2361 int rc = SUCCESS; 2362 int rval; 2363 struct qedf_ioreq *io_req = NULL; 2364 int ref_cnt = 0; 2365 struct fc_rport_priv *rdata = fcport->rdata; 2366 2367 QEDF_ERR(NULL, 2368 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", 2369 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id, 2370 (int)sc_cmd->device->lun); 2371 2372 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 2373 QEDF_ERR(NULL, "stale rport\n"); 2374 return FAILED; 2375 } 2376 2377 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id, 2378 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" : 2379 "LUN RESET"); 2380 2381 if (sc_cmd->SCp.ptr) { 2382 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; 2383 ref_cnt = kref_read(&io_req->refcount); 2384 QEDF_ERR(NULL, 2385 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n", 2386 io_req, io_req->xid, ref_cnt); 2387 } 2388 2389 rval = fc_remote_port_chkready(rport); 2390 if (rval) { 2391 QEDF_ERR(NULL, "device_reset rport not ready\n"); 2392 rc = FAILED; 2393 goto tmf_err; 2394 } 2395 2396 rc = fc_block_scsi_eh(sc_cmd); 2397 if (rc) 2398 goto tmf_err; 2399 2400 if (!fcport) { 2401 QEDF_ERR(NULL, "device_reset: rport is NULL\n"); 2402 rc = FAILED; 2403 goto tmf_err; 2404 } 2405 2406 qedf = fcport->qedf; 2407 2408 if (!qedf) { 2409 QEDF_ERR(NULL, "qedf is NULL.\n"); 2410 rc = FAILED; 2411 goto tmf_err; 2412 } 2413 2414 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 2415 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n"); 2416 rc = SUCCESS; 2417 goto tmf_err; 2418 } 2419 2420 if (test_bit(QEDF_UNLOADING, &qedf->flags) || 2421 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { 2422 rc = SUCCESS; 2423 goto tmf_err; 2424 } 2425 2426 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 2427 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n"); 2428 rc = FAILED; 2429 goto tmf_err; 2430 } 2431 2432 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 2433 if (!fcport->rdata) 2434 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n", 2435 fcport); 2436 else 2437 QEDF_ERR(&qedf->dbg_ctx, 2438 "fcport %p port_id=%06x is uploading.\n", 2439 fcport, fcport->rdata->ids.port_id); 2440 rc = FAILED; 2441 goto tmf_err; 2442 } 2443 2444 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags); 2445 2446 tmf_err: 2447 kref_put(&rdata->kref, fc_rport_destroy); 2448 return rc; 2449 } 2450 2451 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, 2452 struct qedf_ioreq *io_req) 2453 { 2454 struct fcoe_cqe_rsp_info *fcp_rsp; 2455 2456 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); 2457 2458 fcp_rsp = &cqe->cqe_info.rsp_info; 2459 qedf_parse_fcp_rsp(io_req, fcp_rsp); 2460 2461 io_req->sc_cmd = NULL; 2462 complete(&io_req->tm_done); 2463 } 2464 2465 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, 2466 struct fcoe_cqe *cqe) 2467 { 2468 unsigned long flags; 2469 uint16_t tmp; 2470 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len; 2471 u32 payload_len, crc; 2472 struct fc_frame_header *fh; 2473 struct fc_frame *fp; 2474 struct qedf_io_work *io_work; 2475 u32 bdq_idx; 2476 void *bdq_addr; 2477 struct scsi_bd *p_bd_info; 2478 2479 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info; 2480 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2481 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n", 2482 le32_to_cpu(p_bd_info->address.hi), 2483 le32_to_cpu(p_bd_info->address.lo), 2484 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi), 2485 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo), 2486 qedf->bdq_prod_idx, pktlen); 2487 2488 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo); 2489 if (bdq_idx >= QEDF_BDQ_SIZE) { 2490 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", 2491 bdq_idx); 2492 goto increment_prod; 2493 } 2494 2495 bdq_addr = qedf->bdq[bdq_idx].buf_addr; 2496 if (!bdq_addr) { 2497 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping " 2498 "unsolicited packet.\n"); 2499 goto increment_prod; 2500 } 2501 2502 if (qedf_dump_frames) { 2503 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2504 "BDQ frame is at addr=%p.\n", bdq_addr); 2505 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1, 2506 (void *)bdq_addr, pktlen, false); 2507 } 2508 2509 /* Allocate frame */ 2510 payload_len = pktlen - sizeof(struct fc_frame_header); 2511 fp = fc_frame_alloc(qedf->lport, payload_len); 2512 if (!fp) { 2513 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n"); 2514 goto increment_prod; 2515 } 2516 2517 /* Copy data from BDQ buffer into fc_frame struct */ 2518 fh = (struct fc_frame_header *)fc_frame_header_get(fp); 2519 memcpy(fh, (void *)bdq_addr, pktlen); 2520 2521 /* Initialize the frame so libfc sees it as a valid frame */ 2522 crc = fcoe_fc_crc(fp); 2523 fc_frame_init(fp); 2524 fr_dev(fp) = qedf->lport; 2525 fr_sof(fp) = FC_SOF_I3; 2526 fr_eof(fp) = FC_EOF_T; 2527 fr_crc(fp) = cpu_to_le32(~crc); 2528 2529 /* 2530 * We need to return the frame back up to libfc in a non-atomic 2531 * context 2532 */ 2533 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); 2534 if (!io_work) { 2535 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " 2536 "work for I/O completion.\n"); 2537 fc_frame_free(fp); 2538 goto increment_prod; 2539 } 2540 memset(io_work, 0, sizeof(struct qedf_io_work)); 2541 2542 INIT_WORK(&io_work->work, qedf_fp_io_handler); 2543 2544 /* Copy contents of CQE for deferred processing */ 2545 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); 2546 2547 io_work->qedf = qedf; 2548 io_work->fp = fp; 2549 2550 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work); 2551 increment_prod: 2552 spin_lock_irqsave(&qedf->hba_lock, flags); 2553 2554 /* Increment producer to let f/w know we've handled the frame */ 2555 qedf->bdq_prod_idx++; 2556 2557 /* Producer index wraps at uint16_t boundary */ 2558 if (qedf->bdq_prod_idx == 0xffff) 2559 qedf->bdq_prod_idx = 0; 2560 2561 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); 2562 tmp = readw(qedf->bdq_primary_prod); 2563 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); 2564 tmp = readw(qedf->bdq_secondary_prod); 2565 2566 spin_unlock_irqrestore(&qedf->hba_lock, flags); 2567 } 2568