1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include "lpfc_version.h" 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc.h" 47 #include "lpfc_nvme.h" 48 #include "lpfc_scsi.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_crtn.h" 51 #include "lpfc_vport.h" 52 #include "lpfc_debugfs.h" 53 54 /* NVME initiator-based functions */ 55 56 static struct lpfc_io_buf * 57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 58 int idx, int expedite); 59 60 static void 61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); 62 63 static struct nvme_fc_port_template lpfc_nvme_template; 64 65 /** 66 * lpfc_nvme_create_queue - 67 * @pnvme_lport: Transport localport that LS is to be issued from 68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 69 * @qsize: Size of the queue in bytes 70 * @handle: An opaque driver handle used in follow-up calls. 71 * 72 * Driver registers this routine to preallocate and initialize any 73 * internal data structures to bind the @qidx to its internal IO queues. 74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. 75 * 76 * Return value : 77 * 0 - Success 78 * -EINVAL - Unsupported input value. 79 * -ENOMEM - Could not alloc necessary memory 80 **/ 81 static int 82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, 83 unsigned int qidx, u16 qsize, 84 void **handle) 85 { 86 struct lpfc_nvme_lport *lport; 87 struct lpfc_vport *vport; 88 struct lpfc_nvme_qhandle *qhandle; 89 char *str; 90 91 if (!pnvme_lport->private) 92 return -ENOMEM; 93 94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 95 vport = lport->vport; 96 97 if (!vport || vport->load_flag & FC_UNLOADING || 98 vport->phba->hba_flag & HBA_IOQ_FLUSH) 99 return -ENODEV; 100 101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); 102 if (qhandle == NULL) 103 return -ENOMEM; 104 105 qhandle->cpu_id = raw_smp_processor_id(); 106 qhandle->qidx = qidx; 107 /* 108 * NVME qidx == 0 is the admin queue, so both admin queue 109 * and first IO queue will use MSI-X vector and associated 110 * EQ/CQ/WQ at index 0. After that they are sequentially assigned. 111 */ 112 if (qidx) { 113 str = "IO "; /* IO queue */ 114 qhandle->index = ((qidx - 1) % 115 lpfc_nvme_template.max_hw_queues); 116 } else { 117 str = "ADM"; /* Admin queue */ 118 qhandle->index = qidx; 119 } 120 121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 122 "6073 Binding %s HdwQueue %d (cpu %d) to " 123 "hdw_queue %d qhandle x%px\n", str, 124 qidx, qhandle->cpu_id, qhandle->index, qhandle); 125 *handle = (void *)qhandle; 126 return 0; 127 } 128 129 /** 130 * lpfc_nvme_delete_queue - 131 * @pnvme_lport: Transport localport that LS is to be issued from 132 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 133 * @handle: An opaque driver handle from lpfc_nvme_create_queue 134 * 135 * Driver registers this routine to free 136 * any internal data structures to bind the @qidx to its internal 137 * IO queues. 138 * 139 * Return value : 140 * 0 - Success 141 * TODO: What are the failure codes. 142 **/ 143 static void 144 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, 145 unsigned int qidx, 146 void *handle) 147 { 148 struct lpfc_nvme_lport *lport; 149 struct lpfc_vport *vport; 150 151 if (!pnvme_lport->private) 152 return; 153 154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 155 vport = lport->vport; 156 157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", 159 lport, qidx, handle); 160 kfree(handle); 161 } 162 163 static void 164 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) 165 { 166 struct lpfc_nvme_lport *lport = localport->private; 167 168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, 169 "6173 localport x%px delete complete\n", 170 lport); 171 172 /* release any threads waiting for the unreg to complete */ 173 if (lport->vport->localport) 174 complete(lport->lport_unreg_cmp); 175 } 176 177 /* lpfc_nvme_remoteport_delete 178 * 179 * @remoteport: Pointer to an nvme transport remoteport instance. 180 * 181 * This is a template downcall. NVME transport calls this function 182 * when it has completed the unregistration of a previously 183 * registered remoteport. 184 * 185 * Return value : 186 * None 187 */ 188 static void 189 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) 190 { 191 struct lpfc_nvme_rport *rport = remoteport->private; 192 struct lpfc_vport *vport; 193 struct lpfc_nodelist *ndlp; 194 u32 fc4_xpt_flags; 195 196 ndlp = rport->ndlp; 197 if (!ndlp) { 198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n", 199 __func__, rport, remoteport); 200 goto rport_err; 201 } 202 203 vport = ndlp->vport; 204 if (!vport) { 205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n", 206 __func__, ndlp, ndlp->nlp_state, rport); 207 goto rport_err; 208 } 209 210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD; 211 212 /* Remove this rport from the lport's list - memory is owned by the 213 * transport. Remove the ndlp reference for the NVME transport before 214 * calling state machine to remove the node. 215 */ 216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 217 "6146 remoteport delete of remoteport x%px, ndlp x%px " 218 "DID x%x xflags x%x\n", 219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); 220 spin_lock_irq(&ndlp->lock); 221 222 /* The register rebind might have occurred before the delete 223 * downcall. Guard against this race. 224 */ 225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) 226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); 227 228 spin_unlock_irq(&ndlp->lock); 229 230 /* On a devloss timeout event, one more put is executed provided the 231 * NVME and SCSI rport unregister requests are complete. If the vport 232 * is unloading, this extra put is executed by lpfc_drop_node. 233 */ 234 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) 235 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 236 237 rport_err: 238 return; 239 } 240 241 /** 242 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request 243 * @phba: pointer to lpfc hba data structure. 244 * @axchg: pointer to exchange context for the NVME LS request 245 * 246 * This routine is used for processing an asychronously received NVME LS 247 * request. Any remaining validation is done and the LS is then forwarded 248 * to the nvme-fc transport via nvme_fc_rcv_ls_req(). 249 * 250 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) 251 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. 252 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. 253 * 254 * Returns 0 if LS was handled and delivered to the transport 255 * Returns 1 if LS failed to be handled and should be dropped 256 */ 257 int 258 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, 259 struct lpfc_async_xchg_ctx *axchg) 260 { 261 #if (IS_ENABLED(CONFIG_NVME_FC)) 262 struct lpfc_vport *vport; 263 struct lpfc_nvme_rport *lpfc_rport; 264 struct nvme_fc_remote_port *remoteport; 265 struct lpfc_nvme_lport *lport; 266 uint32_t *payload = axchg->payload; 267 int rc; 268 269 vport = axchg->ndlp->vport; 270 lpfc_rport = axchg->ndlp->nrport; 271 if (!lpfc_rport) 272 return -EINVAL; 273 274 remoteport = lpfc_rport->remoteport; 275 if (!vport->localport || 276 vport->phba->hba_flag & HBA_IOQ_FLUSH) 277 return -EINVAL; 278 279 lport = vport->localport->private; 280 if (!lport) 281 return -EINVAL; 282 283 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, 284 axchg->size); 285 286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 287 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " 288 "%08x %08x %08x\n", 289 axchg->size, rc, 290 *payload, *(payload+1), *(payload+2), 291 *(payload+3), *(payload+4), *(payload+5)); 292 293 if (!rc) 294 return 0; 295 #endif 296 return 1; 297 } 298 299 /** 300 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME 301 * LS request. 302 * @phba: Pointer to HBA context object 303 * @vport: The local port that issued the LS 304 * @cmdwqe: Pointer to driver command WQE object. 305 * @wcqe: Pointer to driver response CQE object. 306 * 307 * This function is the generic completion handler for NVME LS requests. 308 * The function updates any states and statistics, calls the transport 309 * ls_req done() routine, then tears down the command and buffers used 310 * for the LS request. 311 **/ 312 void 313 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, 314 struct lpfc_iocbq *cmdwqe, 315 struct lpfc_wcqe_complete *wcqe) 316 { 317 struct nvmefc_ls_req *pnvme_lsreq; 318 struct lpfc_dmabuf *buf_ptr; 319 struct lpfc_nodelist *ndlp; 320 uint32_t status; 321 322 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2; 323 ndlp = (struct lpfc_nodelist *)cmdwqe->context1; 324 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 325 326 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 327 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " 328 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " 329 "ndlp:x%px\n", 330 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 331 cmdwqe->sli4_xritag, status, 332 (wcqe->parameter & 0xffff), 333 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp); 334 335 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", 336 cmdwqe->sli4_xritag, status, wcqe->parameter); 337 338 if (cmdwqe->context3) { 339 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3; 340 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 341 kfree(buf_ptr); 342 cmdwqe->context3 = NULL; 343 } 344 if (pnvme_lsreq->done) 345 pnvme_lsreq->done(pnvme_lsreq, status); 346 else 347 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 348 "6046 NVMEx cmpl without done call back? " 349 "Data x%px DID %x Xri: %x status %x\n", 350 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 351 cmdwqe->sli4_xritag, status); 352 if (ndlp) { 353 lpfc_nlp_put(ndlp); 354 cmdwqe->context1 = NULL; 355 } 356 lpfc_sli_release_iocbq(phba, cmdwqe); 357 } 358 359 static void 360 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 361 struct lpfc_iocbq *rspwqe) 362 { 363 struct lpfc_vport *vport = cmdwqe->vport; 364 struct lpfc_nvme_lport *lport; 365 uint32_t status; 366 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; 367 368 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 369 370 if (vport->localport) { 371 lport = (struct lpfc_nvme_lport *)vport->localport->private; 372 if (lport) { 373 atomic_inc(&lport->fc4NvmeLsCmpls); 374 if (status) { 375 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 376 atomic_inc(&lport->cmpl_ls_xb); 377 atomic_inc(&lport->cmpl_ls_err); 378 } 379 } 380 } 381 382 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); 383 } 384 385 static int 386 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 387 struct lpfc_dmabuf *inp, 388 struct nvmefc_ls_req *pnvme_lsreq, 389 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, 390 struct lpfc_iocbq *), 391 struct lpfc_nodelist *ndlp, uint32_t num_entry, 392 uint32_t tmo, uint8_t retry) 393 { 394 struct lpfc_hba *phba = vport->phba; 395 union lpfc_wqe128 *wqe; 396 struct lpfc_iocbq *genwqe; 397 struct ulp_bde64 *bpl; 398 struct ulp_bde64 bde; 399 int i, rc, xmit_len, first_len; 400 401 /* Allocate buffer for command WQE */ 402 genwqe = lpfc_sli_get_iocbq(phba); 403 if (genwqe == NULL) 404 return 1; 405 406 wqe = &genwqe->wqe; 407 /* Initialize only 64 bytes */ 408 memset(wqe, 0, sizeof(union lpfc_wqe)); 409 410 genwqe->context3 = (uint8_t *)bmp; 411 genwqe->cmd_flag |= LPFC_IO_NVME_LS; 412 413 /* Save for completion so we can release these resources */ 414 genwqe->context1 = lpfc_nlp_get(ndlp); 415 if (!genwqe->context1) { 416 dev_warn(&phba->pcidev->dev, 417 "Warning: Failed node ref, not sending LS_REQ\n"); 418 lpfc_sli_release_iocbq(phba, genwqe); 419 return 1; 420 } 421 422 genwqe->context2 = (uint8_t *)pnvme_lsreq; 423 /* Fill in payload, bp points to frame payload */ 424 425 if (!tmo) 426 /* FC spec states we need 3 * ratov for CT requests */ 427 tmo = (3 * phba->fc_ratov); 428 429 /* For this command calculate the xmit length of the request bde. */ 430 xmit_len = 0; 431 first_len = 0; 432 bpl = (struct ulp_bde64 *)bmp->virt; 433 for (i = 0; i < num_entry; i++) { 434 bde.tus.w = bpl[i].tus.w; 435 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 436 break; 437 xmit_len += bde.tus.f.bdeSize; 438 if (i == 0) 439 first_len = xmit_len; 440 } 441 442 genwqe->num_bdes = num_entry; 443 genwqe->hba_wqidx = 0; 444 445 /* Words 0 - 2 */ 446 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 447 wqe->generic.bde.tus.f.bdeSize = first_len; 448 wqe->generic.bde.addrLow = bpl[0].addrLow; 449 wqe->generic.bde.addrHigh = bpl[0].addrHigh; 450 451 /* Word 3 */ 452 wqe->gen_req.request_payload_len = first_len; 453 454 /* Word 4 */ 455 456 /* Word 5 */ 457 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 458 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 459 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 460 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); 461 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 462 463 /* Word 6 */ 464 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, 465 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 466 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); 467 468 /* Word 7 */ 469 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); 470 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); 471 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); 472 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); 473 474 /* Word 8 */ 475 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; 476 477 /* Word 9 */ 478 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); 479 480 /* Word 10 */ 481 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 482 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 483 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 484 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 485 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 486 487 /* Word 11 */ 488 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 489 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); 490 491 492 /* Issue GEN REQ WQE for NPORT <did> */ 493 genwqe->cmd_cmpl = cmpl; 494 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 495 genwqe->vport = vport; 496 genwqe->retry = retry; 497 498 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", 499 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); 500 501 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); 502 if (rc) { 503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 504 "6045 Issue GEN REQ WQE to NPORT x%x " 505 "Data: x%x x%x rc x%x\n", 506 ndlp->nlp_DID, genwqe->iotag, 507 vport->port_state, rc); 508 lpfc_nlp_put(ndlp); 509 lpfc_sli_release_iocbq(phba, genwqe); 510 return 1; 511 } 512 513 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, 514 "6050 Issue GEN REQ WQE to NPORT x%x " 515 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " 516 "bmp:x%px xmit:%d 1st:%d\n", 517 ndlp->nlp_DID, genwqe->sli4_xritag, 518 vport->port_state, 519 genwqe, pnvme_lsreq, bmp, xmit_len, first_len); 520 return 0; 521 } 522 523 524 /** 525 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request 526 * @vport: The local port issuing the LS 527 * @ndlp: The remote port to send the LS to 528 * @pnvme_lsreq: Pointer to LS request structure from the transport 529 * @gen_req_cmp: Completion call-back 530 * 531 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST 532 * WQE to perform the LS operation. 533 * 534 * Return value : 535 * 0 - Success 536 * non-zero: various error codes, in form of -Exxx 537 **/ 538 int 539 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 540 struct nvmefc_ls_req *pnvme_lsreq, 541 void (*gen_req_cmp)(struct lpfc_hba *phba, 542 struct lpfc_iocbq *cmdwqe, 543 struct lpfc_iocbq *rspwqe)) 544 { 545 struct lpfc_dmabuf *bmp; 546 struct ulp_bde64 *bpl; 547 int ret; 548 uint16_t ntype, nstate; 549 550 if (!ndlp) { 551 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 552 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " 553 "LS Req\n", 554 ndlp); 555 return -ENODEV; 556 } 557 558 ntype = ndlp->nlp_type; 559 nstate = ndlp->nlp_state; 560 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || 561 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { 562 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 563 "6088 NVMEx LS REQ: Fail DID x%06x not " 564 "ready for IO. Type x%x, State x%x\n", 565 ndlp->nlp_DID, ntype, nstate); 566 return -ENODEV; 567 } 568 if (vport->phba->hba_flag & HBA_IOQ_FLUSH) 569 return -ENODEV; 570 571 if (!vport->phba->sli4_hba.nvmels_wq) 572 return -ENOMEM; 573 574 /* 575 * there are two dma buf in the request, actually there is one and 576 * the second one is just the start address + cmd size. 577 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped 578 * in a lpfc_dmabuf struct. When freeing we just free the wrapper 579 * because the nvem layer owns the data bufs. 580 * We do not have to break these packets open, we don't care what is 581 * in them. And we do not have to look at the resonse data, we only 582 * care that we got a response. All of the caring is going to happen 583 * in the nvme-fc layer. 584 */ 585 586 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); 587 if (!bmp) { 588 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 589 "6044 NVMEx LS REQ: Could not alloc LS buf " 590 "for DID %x\n", 591 ndlp->nlp_DID); 592 return -ENOMEM; 593 } 594 595 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); 596 if (!bmp->virt) { 597 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 598 "6042 NVMEx LS REQ: Could not alloc mbuf " 599 "for DID %x\n", 600 ndlp->nlp_DID); 601 kfree(bmp); 602 return -ENOMEM; 603 } 604 605 INIT_LIST_HEAD(&bmp->list); 606 607 bpl = (struct ulp_bde64 *)bmp->virt; 608 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); 609 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); 610 bpl->tus.f.bdeFlags = 0; 611 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; 612 bpl->tus.w = le32_to_cpu(bpl->tus.w); 613 bpl++; 614 615 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); 616 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); 617 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 618 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; 619 bpl->tus.w = le32_to_cpu(bpl->tus.w); 620 621 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 622 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " 623 "rqstlen:%d rsplen:%d %pad %pad\n", 624 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, 625 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 626 &pnvme_lsreq->rspdma); 627 628 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, 629 pnvme_lsreq, gen_req_cmp, ndlp, 2, 630 pnvme_lsreq->timeout, 0); 631 if (ret != WQE_SUCCESS) { 632 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 633 "6052 NVMEx REQ: EXIT. issue ls wqe failed " 634 "lsreq x%px Status %x DID %x\n", 635 pnvme_lsreq, ret, ndlp->nlp_DID); 636 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); 637 kfree(bmp); 638 return -EIO; 639 } 640 641 return 0; 642 } 643 644 /** 645 * lpfc_nvme_ls_req - Issue an NVME Link Service request 646 * @pnvme_lport: Transport localport that LS is to be issued from. 647 * @pnvme_rport: Transport remoteport that LS is to be sent to. 648 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 649 * 650 * Driver registers this routine to handle any link service request 651 * from the nvme_fc transport to a remote nvme-aware port. 652 * 653 * Return value : 654 * 0 - Success 655 * non-zero: various error codes, in form of -Exxx 656 **/ 657 static int 658 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, 659 struct nvme_fc_remote_port *pnvme_rport, 660 struct nvmefc_ls_req *pnvme_lsreq) 661 { 662 struct lpfc_nvme_lport *lport; 663 struct lpfc_nvme_rport *rport; 664 struct lpfc_vport *vport; 665 int ret; 666 667 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 668 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 669 if (unlikely(!lport) || unlikely(!rport)) 670 return -EINVAL; 671 672 vport = lport->vport; 673 if (vport->load_flag & FC_UNLOADING || 674 vport->phba->hba_flag & HBA_IOQ_FLUSH) 675 return -ENODEV; 676 677 atomic_inc(&lport->fc4NvmeLsRequests); 678 679 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, 680 lpfc_nvme_ls_req_cmp); 681 if (ret) 682 atomic_inc(&lport->xmt_ls_err); 683 684 return ret; 685 } 686 687 /** 688 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior 689 * NVME LS request 690 * @vport: The local port that issued the LS 691 * @ndlp: The remote port the LS was sent to 692 * @pnvme_lsreq: Pointer to LS request structure from the transport 693 * 694 * The driver validates the ndlp, looks for the LS, and aborts the 695 * LS if found. 696 * 697 * Returns: 698 * 0 : if LS found and aborted 699 * non-zero: various error conditions in form -Exxx 700 **/ 701 int 702 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 703 struct nvmefc_ls_req *pnvme_lsreq) 704 { 705 struct lpfc_hba *phba = vport->phba; 706 struct lpfc_sli_ring *pring; 707 struct lpfc_iocbq *wqe, *next_wqe; 708 bool foundit = false; 709 710 if (!ndlp) { 711 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 712 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " 713 "x%06x, Failing LS Req\n", 714 ndlp, ndlp ? ndlp->nlp_DID : 0); 715 return -EINVAL; 716 } 717 718 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 719 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " 720 "x%px rqstlen:%d rsplen:%d %pad %pad\n", 721 pnvme_lsreq, pnvme_lsreq->rqstlen, 722 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 723 &pnvme_lsreq->rspdma); 724 725 /* 726 * Lock the ELS ring txcmplq and look for the wqe that matches 727 * this ELS. If found, issue an abort on the wqe. 728 */ 729 pring = phba->sli4_hba.nvmels_wq->pring; 730 spin_lock_irq(&phba->hbalock); 731 spin_lock(&pring->ring_lock); 732 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { 733 if (wqe->context2 == pnvme_lsreq) { 734 wqe->cmd_flag |= LPFC_DRIVER_ABORTED; 735 foundit = true; 736 break; 737 } 738 } 739 spin_unlock(&pring->ring_lock); 740 741 if (foundit) 742 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL); 743 spin_unlock_irq(&phba->hbalock); 744 745 if (foundit) 746 return 0; 747 748 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 749 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n", 750 pnvme_lsreq); 751 return -EINVAL; 752 } 753 754 static int 755 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, 756 struct nvme_fc_remote_port *remoteport, 757 struct nvmefc_ls_rsp *ls_rsp) 758 { 759 struct lpfc_async_xchg_ctx *axchg = 760 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); 761 struct lpfc_nvme_lport *lport; 762 int rc; 763 764 if (axchg->phba->pport->load_flag & FC_UNLOADING) 765 return -ENODEV; 766 767 lport = (struct lpfc_nvme_lport *)localport->private; 768 769 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); 770 771 if (rc) { 772 /* 773 * unless the failure is due to having already sent 774 * the response, an abort will be generated for the 775 * exchange if the rsp can't be sent. 776 */ 777 if (rc != -EALREADY) 778 atomic_inc(&lport->xmt_ls_abort); 779 return rc; 780 } 781 782 return 0; 783 } 784 785 /** 786 * lpfc_nvme_ls_abort - Abort a prior NVME LS request 787 * @pnvme_lport: Transport localport that LS is to be issued from. 788 * @pnvme_rport: Transport remoteport that LS is to be sent to. 789 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 790 * 791 * Driver registers this routine to abort a NVME LS request that is 792 * in progress (from the transports perspective). 793 **/ 794 static void 795 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, 796 struct nvme_fc_remote_port *pnvme_rport, 797 struct nvmefc_ls_req *pnvme_lsreq) 798 { 799 struct lpfc_nvme_lport *lport; 800 struct lpfc_vport *vport; 801 struct lpfc_nodelist *ndlp; 802 int ret; 803 804 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 805 if (unlikely(!lport)) 806 return; 807 vport = lport->vport; 808 809 if (vport->load_flag & FC_UNLOADING) 810 return; 811 812 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 813 814 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); 815 if (!ret) 816 atomic_inc(&lport->xmt_ls_abort); 817 } 818 819 /* Fix up the existing sgls for NVME IO. */ 820 static inline void 821 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, 822 struct lpfc_io_buf *lpfc_ncmd, 823 struct nvmefc_fcp_req *nCmd) 824 { 825 struct lpfc_hba *phba = vport->phba; 826 struct sli4_sge *sgl; 827 union lpfc_wqe128 *wqe; 828 uint32_t *wptr, *dptr; 829 830 /* 831 * Get a local pointer to the built-in wqe and correct 832 * the cmd size to match NVME's 96 bytes and fix 833 * the dma address. 834 */ 835 836 wqe = &lpfc_ncmd->cur_iocbq.wqe; 837 838 /* 839 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to 840 * match NVME. NVME sends 96 bytes. Also, use the 841 * nvme commands command and response dma addresses 842 * rather than the virtual memory to ease the restore 843 * operation. 844 */ 845 sgl = lpfc_ncmd->dma_sgl; 846 sgl->sge_len = cpu_to_le32(nCmd->cmdlen); 847 if (phba->cfg_nvme_embed_cmd) { 848 sgl->addr_hi = 0; 849 sgl->addr_lo = 0; 850 851 /* Word 0-2 - NVME CMND IU (embedded payload) */ 852 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; 853 wqe->generic.bde.tus.f.bdeSize = 56; 854 wqe->generic.bde.addrHigh = 0; 855 wqe->generic.bde.addrLow = 64; /* Word 16 */ 856 857 /* Word 10 - dbde is 0, wqes is 1 in template */ 858 859 /* 860 * Embed the payload in the last half of the WQE 861 * WQE words 16-30 get the NVME CMD IU payload 862 * 863 * WQE words 16-19 get payload Words 1-4 864 * WQE words 20-21 get payload Words 6-7 865 * WQE words 22-29 get payload Words 16-23 866 */ 867 wptr = &wqe->words[16]; /* WQE ptr */ 868 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 869 dptr++; /* Skip Word 0 in payload */ 870 871 *wptr++ = *dptr++; /* Word 1 */ 872 *wptr++ = *dptr++; /* Word 2 */ 873 *wptr++ = *dptr++; /* Word 3 */ 874 *wptr++ = *dptr++; /* Word 4 */ 875 dptr++; /* Skip Word 5 in payload */ 876 *wptr++ = *dptr++; /* Word 6 */ 877 *wptr++ = *dptr++; /* Word 7 */ 878 dptr += 8; /* Skip Words 8-15 in payload */ 879 *wptr++ = *dptr++; /* Word 16 */ 880 *wptr++ = *dptr++; /* Word 17 */ 881 *wptr++ = *dptr++; /* Word 18 */ 882 *wptr++ = *dptr++; /* Word 19 */ 883 *wptr++ = *dptr++; /* Word 20 */ 884 *wptr++ = *dptr++; /* Word 21 */ 885 *wptr++ = *dptr++; /* Word 22 */ 886 *wptr = *dptr; /* Word 23 */ 887 } else { 888 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); 889 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); 890 891 /* Word 0-2 - NVME CMND IU Inline BDE */ 892 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 893 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; 894 wqe->generic.bde.addrHigh = sgl->addr_hi; 895 wqe->generic.bde.addrLow = sgl->addr_lo; 896 897 /* Word 10 */ 898 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 899 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); 900 } 901 902 sgl++; 903 904 /* Setup the physical region for the FCP RSP */ 905 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); 906 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); 907 sgl->word2 = le32_to_cpu(sgl->word2); 908 if (nCmd->sg_cnt) 909 bf_set(lpfc_sli4_sge_last, sgl, 0); 910 else 911 bf_set(lpfc_sli4_sge_last, sgl, 1); 912 sgl->word2 = cpu_to_le32(sgl->word2); 913 sgl->sge_len = cpu_to_le32(nCmd->rsplen); 914 } 915 916 917 /* 918 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO 919 * 920 * Driver registers this routine as it io request handler. This 921 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 922 * data structure to the rport indicated in @lpfc_nvme_rport. 923 * 924 * Return value : 925 * 0 - Success 926 * TODO: What are the failure codes. 927 **/ 928 static void 929 lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 930 struct lpfc_iocbq *pwqeOut) 931 { 932 struct lpfc_io_buf *lpfc_ncmd = 933 (struct lpfc_io_buf *)pwqeIn->context1; 934 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; 935 struct lpfc_vport *vport = pwqeIn->vport; 936 struct nvmefc_fcp_req *nCmd; 937 struct nvme_fc_ersp_iu *ep; 938 struct nvme_fc_cmd_iu *cp; 939 struct lpfc_nodelist *ndlp; 940 struct lpfc_nvme_fcpreq_priv *freqpriv; 941 struct lpfc_nvme_lport *lport; 942 uint32_t code, status, idx; 943 uint16_t cid, sqhd, data; 944 uint32_t *ptr; 945 uint32_t lat; 946 bool call_done = false; 947 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 948 int cpu; 949 #endif 950 int offline = 0; 951 952 /* Sanity check on return of outstanding command */ 953 if (!lpfc_ncmd) { 954 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 955 "6071 Null lpfc_ncmd pointer. No " 956 "release, skip completion\n"); 957 return; 958 } 959 960 /* Guard against abort handler being called at same time */ 961 spin_lock(&lpfc_ncmd->buf_lock); 962 963 if (!lpfc_ncmd->nvmeCmd) { 964 spin_unlock(&lpfc_ncmd->buf_lock); 965 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 966 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " 967 "nvmeCmd x%px\n", 968 lpfc_ncmd, lpfc_ncmd->nvmeCmd); 969 970 /* Release the lpfc_ncmd regardless of the missing elements. */ 971 lpfc_release_nvme_buf(phba, lpfc_ncmd); 972 return; 973 } 974 nCmd = lpfc_ncmd->nvmeCmd; 975 status = bf_get(lpfc_wcqe_c_status, wcqe); 976 977 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 978 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; 979 980 if (unlikely(status && vport->localport)) { 981 lport = (struct lpfc_nvme_lport *)vport->localport->private; 982 if (lport) { 983 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 984 atomic_inc(&lport->cmpl_fcp_xb); 985 atomic_inc(&lport->cmpl_fcp_err); 986 } 987 } 988 989 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", 990 lpfc_ncmd->cur_iocbq.sli4_xritag, 991 status, wcqe->parameter); 992 /* 993 * Catch race where our node has transitioned, but the 994 * transport is still transitioning. 995 */ 996 ndlp = lpfc_ncmd->ndlp; 997 if (!ndlp) { 998 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 999 "6062 Ignoring NVME cmpl. No ndlp\n"); 1000 goto out_err; 1001 } 1002 1003 code = bf_get(lpfc_wcqe_c_code, wcqe); 1004 if (code == CQE_CODE_NVME_ERSP) { 1005 /* For this type of CQE, we need to rebuild the rsp */ 1006 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; 1007 1008 /* 1009 * Get Command Id from cmd to plug into response. This 1010 * code is not needed in the next NVME Transport drop. 1011 */ 1012 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; 1013 cid = cp->sqe.common.command_id; 1014 1015 /* 1016 * RSN is in CQE word 2 1017 * SQHD is in CQE Word 3 bits 15:0 1018 * Cmd Specific info is in CQE Word 1 1019 * and in CQE Word 0 bits 15:0 1020 */ 1021 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); 1022 1023 /* Now lets build the NVME ERSP IU */ 1024 ep->iu_len = cpu_to_be16(8); 1025 ep->rsn = wcqe->parameter; 1026 ep->xfrd_len = cpu_to_be32(nCmd->payload_length); 1027 ep->rsvd12 = 0; 1028 ptr = (uint32_t *)&ep->cqe.result.u64; 1029 *ptr++ = wcqe->total_data_placed; 1030 data = bf_get(lpfc_wcqe_c_ersp0, wcqe); 1031 *ptr = (uint32_t)data; 1032 ep->cqe.sq_head = sqhd; 1033 ep->cqe.sq_id = nCmd->sqid; 1034 ep->cqe.command_id = cid; 1035 ep->cqe.status = 0; 1036 1037 lpfc_ncmd->status = IOSTAT_SUCCESS; 1038 lpfc_ncmd->result = 0; 1039 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; 1040 nCmd->transferred_length = nCmd->payload_length; 1041 } else { 1042 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK); 1043 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 1044 1045 /* For NVME, the only failure path that results in an 1046 * IO error is when the adapter rejects it. All other 1047 * conditions are a success case and resolved by the 1048 * transport. 1049 * IOSTAT_FCP_RSP_ERROR means: 1050 * 1. Length of data received doesn't match total 1051 * transfer length in WQE 1052 * 2. If the RSP payload does NOT match these cases: 1053 * a. RSP length 12/24 bytes and all zeros 1054 * b. NVME ERSP 1055 */ 1056 switch (lpfc_ncmd->status) { 1057 case IOSTAT_SUCCESS: 1058 nCmd->transferred_length = wcqe->total_data_placed; 1059 nCmd->rcv_rsplen = 0; 1060 nCmd->status = 0; 1061 break; 1062 case IOSTAT_FCP_RSP_ERROR: 1063 nCmd->transferred_length = wcqe->total_data_placed; 1064 nCmd->rcv_rsplen = wcqe->parameter; 1065 nCmd->status = 0; 1066 1067 /* Check if this is really an ERSP */ 1068 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { 1069 lpfc_ncmd->status = IOSTAT_SUCCESS; 1070 lpfc_ncmd->result = 0; 1071 1072 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1073 "6084 NVME Completion ERSP: " 1074 "xri %x placed x%x\n", 1075 lpfc_ncmd->cur_iocbq.sli4_xritag, 1076 wcqe->total_data_placed); 1077 break; 1078 } 1079 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1080 "6081 NVME Completion Protocol Error: " 1081 "xri %x status x%x result x%x " 1082 "placed x%x\n", 1083 lpfc_ncmd->cur_iocbq.sli4_xritag, 1084 lpfc_ncmd->status, lpfc_ncmd->result, 1085 wcqe->total_data_placed); 1086 break; 1087 case IOSTAT_LOCAL_REJECT: 1088 /* Let fall through to set command final state. */ 1089 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) 1090 lpfc_printf_vlog(vport, KERN_INFO, 1091 LOG_NVME_IOERR, 1092 "6032 Delay Aborted cmd x%px " 1093 "nvme cmd x%px, xri x%x, " 1094 "xb %d\n", 1095 lpfc_ncmd, nCmd, 1096 lpfc_ncmd->cur_iocbq.sli4_xritag, 1097 bf_get(lpfc_wcqe_c_xb, wcqe)); 1098 fallthrough; 1099 default: 1100 out_err: 1101 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1102 "6072 NVME Completion Error: xri %x " 1103 "status x%x result x%x [x%x] " 1104 "placed x%x\n", 1105 lpfc_ncmd->cur_iocbq.sli4_xritag, 1106 lpfc_ncmd->status, lpfc_ncmd->result, 1107 wcqe->parameter, 1108 wcqe->total_data_placed); 1109 nCmd->transferred_length = 0; 1110 nCmd->rcv_rsplen = 0; 1111 nCmd->status = NVME_SC_INTERNAL; 1112 offline = pci_channel_offline(vport->phba->pcidev); 1113 } 1114 } 1115 1116 /* pick up SLI4 exhange busy condition */ 1117 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) 1118 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; 1119 else 1120 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; 1121 1122 /* Update stats and complete the IO. There is 1123 * no need for dma unprep because the nvme_transport 1124 * owns the dma address. 1125 */ 1126 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1127 if (lpfc_ncmd->ts_cmd_start) { 1128 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; 1129 lpfc_ncmd->ts_data_io = ktime_get_ns(); 1130 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; 1131 lpfc_io_ktime(phba, lpfc_ncmd); 1132 } 1133 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { 1134 cpu = raw_smp_processor_id(); 1135 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 1136 if (lpfc_ncmd->cpu != cpu) 1137 lpfc_printf_vlog(vport, 1138 KERN_INFO, LOG_NVME_IOERR, 1139 "6701 CPU Check cmpl: " 1140 "cpu %d expect %d\n", 1141 cpu, lpfc_ncmd->cpu); 1142 } 1143 #endif 1144 1145 /* NVME targets need completion held off until the abort exchange 1146 * completes unless the NVME Rport is getting unregistered. 1147 */ 1148 1149 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 1150 freqpriv = nCmd->private; 1151 freqpriv->nvme_buf = NULL; 1152 lpfc_ncmd->nvmeCmd = NULL; 1153 call_done = true; 1154 } 1155 spin_unlock(&lpfc_ncmd->buf_lock); 1156 1157 /* Check if IO qualified for CMF */ 1158 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1159 nCmd->io_dir == NVMEFC_FCP_READ && 1160 nCmd->payload_length) { 1161 /* Used when calculating average latency */ 1162 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; 1163 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); 1164 } 1165 1166 if (call_done) 1167 nCmd->done(nCmd); 1168 1169 /* Call release with XB=1 to queue the IO into the abort list. */ 1170 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1171 } 1172 1173 1174 /** 1175 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO 1176 * @vport: pointer to a host virtual N_Port data structure 1177 * @lpfc_ncmd: Pointer to lpfc scsi command 1178 * @pnode: pointer to a node-list data structure 1179 * @cstat: pointer to the control status structure 1180 * 1181 * Driver registers this routine as it io request handler. This 1182 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1183 * data structure to the rport indicated in @lpfc_nvme_rport. 1184 * 1185 * Return value : 1186 * 0 - Success 1187 * TODO: What are the failure codes. 1188 **/ 1189 static int 1190 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, 1191 struct lpfc_io_buf *lpfc_ncmd, 1192 struct lpfc_nodelist *pnode, 1193 struct lpfc_fc4_ctrl_stat *cstat) 1194 { 1195 struct lpfc_hba *phba = vport->phba; 1196 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1197 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); 1198 union lpfc_wqe128 *wqe = &pwqeq->wqe; 1199 uint32_t req_len; 1200 1201 /* 1202 * There are three possibilities here - use scatter-gather segment, use 1203 * the single mapping, or neither. 1204 */ 1205 if (nCmd->sg_cnt) { 1206 if (nCmd->io_dir == NVMEFC_FCP_WRITE) { 1207 /* From the iwrite template, initialize words 7 - 11 */ 1208 memcpy(&wqe->words[7], 1209 &lpfc_iwrite_cmd_template.words[7], 1210 sizeof(uint32_t) * 5); 1211 1212 /* Word 4 */ 1213 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; 1214 1215 /* Word 5 */ 1216 if ((phba->cfg_nvme_enable_fb) && 1217 (pnode->nlp_flag & NLP_FIRSTBURST)) { 1218 req_len = lpfc_ncmd->nvmeCmd->payload_length; 1219 if (req_len < pnode->nvme_fb_size) 1220 wqe->fcp_iwrite.initial_xfer_len = 1221 req_len; 1222 else 1223 wqe->fcp_iwrite.initial_xfer_len = 1224 pnode->nvme_fb_size; 1225 } else { 1226 wqe->fcp_iwrite.initial_xfer_len = 0; 1227 } 1228 cstat->output_requests++; 1229 } else { 1230 /* From the iread template, initialize words 7 - 11 */ 1231 memcpy(&wqe->words[7], 1232 &lpfc_iread_cmd_template.words[7], 1233 sizeof(uint32_t) * 5); 1234 1235 /* Word 4 */ 1236 wqe->fcp_iread.total_xfer_len = nCmd->payload_length; 1237 1238 /* Word 5 */ 1239 wqe->fcp_iread.rsrvd5 = 0; 1240 1241 /* For a CMF Managed port, iod must be zero'ed */ 1242 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 1243 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 1244 LPFC_WQE_IOD_NONE); 1245 cstat->input_requests++; 1246 } 1247 } else { 1248 /* From the icmnd template, initialize words 4 - 11 */ 1249 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 1250 sizeof(uint32_t) * 8); 1251 cstat->control_requests++; 1252 } 1253 1254 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) 1255 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 1256 /* 1257 * Finish initializing those WQE fields that are independent 1258 * of the nvme_cmnd request_buffer 1259 */ 1260 1261 /* Word 3 */ 1262 bf_set(payload_offset_len, &wqe->fcp_icmd, 1263 (nCmd->rsplen + nCmd->cmdlen)); 1264 1265 /* Word 6 */ 1266 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 1267 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 1268 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 1269 1270 /* Word 8 */ 1271 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 1272 1273 /* Word 9 */ 1274 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 1275 1276 /* Word 10 */ 1277 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); 1278 1279 /* Words 13 14 15 are for PBDE support */ 1280 1281 pwqeq->vport = vport; 1282 return 0; 1283 } 1284 1285 1286 /** 1287 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO 1288 * @vport: pointer to a host virtual N_Port data structure 1289 * @lpfc_ncmd: Pointer to lpfc scsi command 1290 * 1291 * Driver registers this routine as it io request handler. This 1292 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1293 * data structure to the rport indicated in @lpfc_nvme_rport. 1294 * 1295 * Return value : 1296 * 0 - Success 1297 * TODO: What are the failure codes. 1298 **/ 1299 static int 1300 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, 1301 struct lpfc_io_buf *lpfc_ncmd) 1302 { 1303 struct lpfc_hba *phba = vport->phba; 1304 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1305 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; 1306 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; 1307 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1308 struct scatterlist *data_sg; 1309 struct sli4_sge *first_data_sgl; 1310 struct ulp_bde64 *bde; 1311 dma_addr_t physaddr = 0; 1312 uint32_t dma_len = 0; 1313 uint32_t dma_offset = 0; 1314 int nseg, i, j; 1315 bool lsp_just_set = false; 1316 1317 /* Fix up the command and response DMA stuff. */ 1318 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); 1319 1320 /* 1321 * There are three possibilities here - use scatter-gather segment, use 1322 * the single mapping, or neither. 1323 */ 1324 if (nCmd->sg_cnt) { 1325 /* 1326 * Jump over the cmd and rsp SGEs. The fix routine 1327 * has already adjusted for this. 1328 */ 1329 sgl += 2; 1330 1331 first_data_sgl = sgl; 1332 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1333 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { 1334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1335 "6058 Too many sg segments from " 1336 "NVME Transport. Max %d, " 1337 "nvmeIO sg_cnt %d\n", 1338 phba->cfg_nvme_seg_cnt + 1, 1339 lpfc_ncmd->seg_cnt); 1340 lpfc_ncmd->seg_cnt = 0; 1341 return 1; 1342 } 1343 1344 /* 1345 * The driver established a maximum scatter-gather segment count 1346 * during probe that limits the number of sg elements in any 1347 * single nvme command. Just run through the seg_cnt and format 1348 * the sge's. 1349 */ 1350 nseg = nCmd->sg_cnt; 1351 data_sg = nCmd->first_sgl; 1352 1353 /* for tracking the segment boundaries */ 1354 j = 2; 1355 for (i = 0; i < nseg; i++) { 1356 if (data_sg == NULL) { 1357 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1358 "6059 dptr err %d, nseg %d\n", 1359 i, nseg); 1360 lpfc_ncmd->seg_cnt = 0; 1361 return 1; 1362 } 1363 1364 sgl->word2 = 0; 1365 if (nseg == 1) { 1366 bf_set(lpfc_sli4_sge_last, sgl, 1); 1367 bf_set(lpfc_sli4_sge_type, sgl, 1368 LPFC_SGE_TYPE_DATA); 1369 } else { 1370 bf_set(lpfc_sli4_sge_last, sgl, 0); 1371 1372 /* expand the segment */ 1373 if (!lsp_just_set && 1374 !((j + 1) % phba->border_sge_num) && 1375 ((nseg - 1) != i)) { 1376 /* set LSP type */ 1377 bf_set(lpfc_sli4_sge_type, sgl, 1378 LPFC_SGE_TYPE_LSP); 1379 1380 sgl_xtra = lpfc_get_sgl_per_hdwq( 1381 phba, lpfc_ncmd); 1382 1383 if (unlikely(!sgl_xtra)) { 1384 lpfc_ncmd->seg_cnt = 0; 1385 return 1; 1386 } 1387 sgl->addr_lo = cpu_to_le32(putPaddrLow( 1388 sgl_xtra->dma_phys_sgl)); 1389 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 1390 sgl_xtra->dma_phys_sgl)); 1391 1392 } else { 1393 bf_set(lpfc_sli4_sge_type, sgl, 1394 LPFC_SGE_TYPE_DATA); 1395 } 1396 } 1397 1398 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 1399 LPFC_SGE_TYPE_LSP)) { 1400 if ((nseg - 1) == i) 1401 bf_set(lpfc_sli4_sge_last, sgl, 1); 1402 1403 physaddr = data_sg->dma_address; 1404 dma_len = data_sg->length; 1405 sgl->addr_lo = cpu_to_le32( 1406 putPaddrLow(physaddr)); 1407 sgl->addr_hi = cpu_to_le32( 1408 putPaddrHigh(physaddr)); 1409 1410 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1411 sgl->word2 = cpu_to_le32(sgl->word2); 1412 sgl->sge_len = cpu_to_le32(dma_len); 1413 1414 dma_offset += dma_len; 1415 data_sg = sg_next(data_sg); 1416 1417 sgl++; 1418 1419 lsp_just_set = false; 1420 } else { 1421 sgl->word2 = cpu_to_le32(sgl->word2); 1422 1423 sgl->sge_len = cpu_to_le32( 1424 phba->cfg_sg_dma_buf_size); 1425 1426 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 1427 i = i - 1; 1428 1429 lsp_just_set = true; 1430 } 1431 1432 j++; 1433 } 1434 1435 /* PBDE support for first data SGE only */ 1436 if (nseg == 1 && phba->cfg_enable_pbde) { 1437 /* Words 13-15 */ 1438 bde = (struct ulp_bde64 *) 1439 &wqe->words[13]; 1440 bde->addrLow = first_data_sgl->addr_lo; 1441 bde->addrHigh = first_data_sgl->addr_hi; 1442 bde->tus.f.bdeSize = 1443 le32_to_cpu(first_data_sgl->sge_len); 1444 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1445 bde->tus.w = cpu_to_le32(bde->tus.w); 1446 1447 /* Word 11 - set PBDE bit */ 1448 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 1449 } else { 1450 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 1451 /* Word 11 - PBDE bit disabled by default template */ 1452 } 1453 1454 } else { 1455 lpfc_ncmd->seg_cnt = 0; 1456 1457 /* For this clause to be valid, the payload_length 1458 * and sg_cnt must zero. 1459 */ 1460 if (nCmd->payload_length != 0) { 1461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1462 "6063 NVME DMA Prep Err: sg_cnt %d " 1463 "payload_length x%x\n", 1464 nCmd->sg_cnt, nCmd->payload_length); 1465 return 1; 1466 } 1467 } 1468 return 0; 1469 } 1470 1471 /** 1472 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO 1473 * @pnvme_lport: Pointer to the driver's local port data 1474 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1475 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1476 * @pnvme_fcreq: IO request from nvme fc to driver. 1477 * 1478 * Driver registers this routine as it io request handler. This 1479 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1480 * data structure to the rport indicated in @lpfc_nvme_rport. 1481 * 1482 * Return value : 1483 * 0 - Success 1484 * TODO: What are the failure codes. 1485 **/ 1486 static int 1487 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, 1488 struct nvme_fc_remote_port *pnvme_rport, 1489 void *hw_queue_handle, 1490 struct nvmefc_fcp_req *pnvme_fcreq) 1491 { 1492 int ret = 0; 1493 int expedite = 0; 1494 int idx, cpu; 1495 struct lpfc_nvme_lport *lport; 1496 struct lpfc_fc4_ctrl_stat *cstat; 1497 struct lpfc_vport *vport; 1498 struct lpfc_hba *phba; 1499 struct lpfc_nodelist *ndlp; 1500 struct lpfc_io_buf *lpfc_ncmd; 1501 struct lpfc_nvme_rport *rport; 1502 struct lpfc_nvme_qhandle *lpfc_queue_info; 1503 struct lpfc_nvme_fcpreq_priv *freqpriv; 1504 struct nvme_common_command *sqe; 1505 uint64_t start = 0; 1506 1507 /* Validate pointers. LLDD fault handling with transport does 1508 * have timing races. 1509 */ 1510 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1511 if (unlikely(!lport)) { 1512 ret = -EINVAL; 1513 goto out_fail; 1514 } 1515 1516 vport = lport->vport; 1517 1518 if (unlikely(!hw_queue_handle)) { 1519 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1520 "6117 Fail IO, NULL hw_queue_handle\n"); 1521 atomic_inc(&lport->xmt_fcp_err); 1522 ret = -EBUSY; 1523 goto out_fail; 1524 } 1525 1526 phba = vport->phba; 1527 1528 if ((unlikely(vport->load_flag & FC_UNLOADING)) || 1529 phba->hba_flag & HBA_IOQ_FLUSH) { 1530 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1531 "6124 Fail IO, Driver unload\n"); 1532 atomic_inc(&lport->xmt_fcp_err); 1533 ret = -ENODEV; 1534 goto out_fail; 1535 } 1536 1537 freqpriv = pnvme_fcreq->private; 1538 if (unlikely(!freqpriv)) { 1539 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1540 "6158 Fail IO, NULL request data\n"); 1541 atomic_inc(&lport->xmt_fcp_err); 1542 ret = -EINVAL; 1543 goto out_fail; 1544 } 1545 1546 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1547 if (phba->ktime_on) 1548 start = ktime_get_ns(); 1549 #endif 1550 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1551 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; 1552 1553 /* 1554 * Catch race where our node has transitioned, but the 1555 * transport is still transitioning. 1556 */ 1557 ndlp = rport->ndlp; 1558 if (!ndlp) { 1559 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1560 "6053 Busy IO, ndlp not ready: rport x%px " 1561 "ndlp x%px, DID x%06x\n", 1562 rport, ndlp, pnvme_rport->port_id); 1563 atomic_inc(&lport->xmt_fcp_err); 1564 ret = -EBUSY; 1565 goto out_fail; 1566 } 1567 1568 /* The remote node has to be a mapped target or it's an error. */ 1569 if ((ndlp->nlp_type & NLP_NVME_TARGET) && 1570 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 1571 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1572 "6036 Fail IO, DID x%06x not ready for " 1573 "IO. State x%x, Type x%x Flg x%x\n", 1574 pnvme_rport->port_id, 1575 ndlp->nlp_state, ndlp->nlp_type, 1576 ndlp->fc4_xpt_flags); 1577 atomic_inc(&lport->xmt_fcp_bad_ndlp); 1578 ret = -EBUSY; 1579 goto out_fail; 1580 1581 } 1582 1583 /* Currently only NVME Keep alive commands should be expedited 1584 * if the driver runs out of a resource. These should only be 1585 * issued on the admin queue, qidx 0 1586 */ 1587 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { 1588 sqe = &((struct nvme_fc_cmd_iu *) 1589 pnvme_fcreq->cmdaddr)->sqe.common; 1590 if (sqe->opcode == nvme_admin_keep_alive) 1591 expedite = 1; 1592 } 1593 1594 /* Check if IO qualifies for CMF */ 1595 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1596 pnvme_fcreq->io_dir == NVMEFC_FCP_READ && 1597 pnvme_fcreq->payload_length) { 1598 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); 1599 if (ret) { 1600 ret = -EBUSY; 1601 goto out_fail; 1602 } 1603 /* Get start time for IO latency */ 1604 start = ktime_get_ns(); 1605 } 1606 1607 /* The node is shared with FCP IO, make sure the IO pending count does 1608 * not exceed the programmed depth. 1609 */ 1610 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 1611 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && 1612 !expedite) { 1613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1614 "6174 Fail IO, ndlp qdepth exceeded: " 1615 "idx %d DID %x pend %d qdepth %d\n", 1616 lpfc_queue_info->index, ndlp->nlp_DID, 1617 atomic_read(&ndlp->cmd_pending), 1618 ndlp->cmd_qdepth); 1619 atomic_inc(&lport->xmt_fcp_qdepth); 1620 ret = -EBUSY; 1621 goto out_fail1; 1622 } 1623 } 1624 1625 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ 1626 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 1627 idx = lpfc_queue_info->index; 1628 } else { 1629 cpu = raw_smp_processor_id(); 1630 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 1631 } 1632 1633 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); 1634 if (lpfc_ncmd == NULL) { 1635 atomic_inc(&lport->xmt_fcp_noxri); 1636 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1637 "6065 Fail IO, driver buffer pool is empty: " 1638 "idx %d DID %x\n", 1639 lpfc_queue_info->index, ndlp->nlp_DID); 1640 ret = -EBUSY; 1641 goto out_fail1; 1642 } 1643 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1644 if (start) { 1645 lpfc_ncmd->ts_cmd_start = start; 1646 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; 1647 } else { 1648 lpfc_ncmd->ts_cmd_start = 0; 1649 } 1650 #endif 1651 lpfc_ncmd->rx_cmd_start = start; 1652 1653 /* 1654 * Store the data needed by the driver to issue, abort, and complete 1655 * an IO. 1656 * Do not let the IO hang out forever. There is no midlayer issuing 1657 * an abort so inform the FW of the maximum IO pending time. 1658 */ 1659 freqpriv->nvme_buf = lpfc_ncmd; 1660 lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1661 lpfc_ncmd->ndlp = ndlp; 1662 lpfc_ncmd->qidx = lpfc_queue_info->qidx; 1663 1664 /* 1665 * Issue the IO on the WQ indicated by index in the hw_queue_handle. 1666 * This identfier was create in our hardware queue create callback 1667 * routine. The driver now is dependent on the IO queue steering from 1668 * the transport. We are trusting the upper NVME layers know which 1669 * index to use and that they have affinitized a CPU to this hardware 1670 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. 1671 */ 1672 lpfc_ncmd->cur_iocbq.hba_wqidx = idx; 1673 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; 1674 1675 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); 1676 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); 1677 if (ret) { 1678 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1679 "6175 Fail IO, Prep DMA: " 1680 "idx %d DID %x\n", 1681 lpfc_queue_info->index, ndlp->nlp_DID); 1682 atomic_inc(&lport->xmt_fcp_err); 1683 ret = -ENOMEM; 1684 goto out_free_nvme_buf; 1685 } 1686 1687 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", 1688 lpfc_ncmd->cur_iocbq.sli4_xritag, 1689 lpfc_queue_info->index, ndlp->nlp_DID); 1690 1691 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); 1692 if (ret) { 1693 atomic_inc(&lport->xmt_fcp_wqerr); 1694 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1695 "6113 Fail IO, Could not issue WQE err %x " 1696 "sid: x%x did: x%x oxid: x%x\n", 1697 ret, vport->fc_myDID, ndlp->nlp_DID, 1698 lpfc_ncmd->cur_iocbq.sli4_xritag); 1699 goto out_free_nvme_buf; 1700 } 1701 1702 if (phba->cfg_xri_rebalancing) 1703 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); 1704 1705 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1706 if (lpfc_ncmd->ts_cmd_start) 1707 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); 1708 1709 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { 1710 cpu = raw_smp_processor_id(); 1711 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 1712 lpfc_ncmd->cpu = cpu; 1713 if (idx != cpu) 1714 lpfc_printf_vlog(vport, 1715 KERN_INFO, LOG_NVME_IOERR, 1716 "6702 CPU Check cmd: " 1717 "cpu %d wq %d\n", 1718 lpfc_ncmd->cpu, 1719 lpfc_queue_info->index); 1720 } 1721 #endif 1722 return 0; 1723 1724 out_free_nvme_buf: 1725 if (lpfc_ncmd->nvmeCmd->sg_cnt) { 1726 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) 1727 cstat->output_requests--; 1728 else 1729 cstat->input_requests--; 1730 } else 1731 cstat->control_requests--; 1732 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1733 out_fail1: 1734 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, 1735 pnvme_fcreq->payload_length, NULL); 1736 out_fail: 1737 return ret; 1738 } 1739 1740 /** 1741 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. 1742 * @phba: Pointer to HBA context object 1743 * @cmdiocb: Pointer to command iocb object. 1744 * @abts_cmpl: Pointer to wcqe complete object. 1745 * 1746 * This is the callback function for any NVME FCP IO that was aborted. 1747 * 1748 * Return value: 1749 * None 1750 **/ 1751 void 1752 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1753 struct lpfc_wcqe_complete *abts_cmpl) 1754 { 1755 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 1756 "6145 ABORT_XRI_CN completing on rpi x%x " 1757 "original iotag x%x, abort cmd iotag x%x " 1758 "req_tag x%x, status x%x, hwstatus x%x\n", 1759 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), 1760 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, 1761 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), 1762 bf_get(lpfc_wcqe_c_status, abts_cmpl), 1763 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); 1764 lpfc_sli_release_iocbq(phba, cmdiocb); 1765 } 1766 1767 /** 1768 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS 1769 * @pnvme_lport: Pointer to the driver's local port data 1770 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1771 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1772 * @pnvme_fcreq: IO request from nvme fc to driver. 1773 * 1774 * Driver registers this routine as its nvme request io abort handler. This 1775 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq 1776 * data structure to the rport indicated in @lpfc_nvme_rport. This routine 1777 * is executed asynchronously - one the target is validated as "MAPPED" and 1778 * ready for IO, the driver issues the abort request and returns. 1779 * 1780 * Return value: 1781 * None 1782 **/ 1783 static void 1784 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, 1785 struct nvme_fc_remote_port *pnvme_rport, 1786 void *hw_queue_handle, 1787 struct nvmefc_fcp_req *pnvme_fcreq) 1788 { 1789 struct lpfc_nvme_lport *lport; 1790 struct lpfc_vport *vport; 1791 struct lpfc_hba *phba; 1792 struct lpfc_io_buf *lpfc_nbuf; 1793 struct lpfc_iocbq *nvmereq_wqe; 1794 struct lpfc_nvme_fcpreq_priv *freqpriv; 1795 unsigned long flags; 1796 int ret_val; 1797 1798 /* Validate pointers. LLDD fault handling with transport does 1799 * have timing races. 1800 */ 1801 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1802 if (unlikely(!lport)) 1803 return; 1804 1805 vport = lport->vport; 1806 1807 if (unlikely(!hw_queue_handle)) { 1808 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1809 "6129 Fail Abort, HW Queue Handle NULL.\n"); 1810 return; 1811 } 1812 1813 phba = vport->phba; 1814 freqpriv = pnvme_fcreq->private; 1815 1816 if (unlikely(!freqpriv)) 1817 return; 1818 if (vport->load_flag & FC_UNLOADING) 1819 return; 1820 1821 /* Announce entry to new IO submit field. */ 1822 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1823 "6002 Abort Request to rport DID x%06x " 1824 "for nvme_fc_req x%px\n", 1825 pnvme_rport->port_id, 1826 pnvme_fcreq); 1827 1828 /* If the hba is getting reset, this flag is set. It is 1829 * cleared when the reset is complete and rings reestablished. 1830 */ 1831 spin_lock_irqsave(&phba->hbalock, flags); 1832 /* driver queued commands are in process of being flushed */ 1833 if (phba->hba_flag & HBA_IOQ_FLUSH) { 1834 spin_unlock_irqrestore(&phba->hbalock, flags); 1835 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1836 "6139 Driver in reset cleanup - flushing " 1837 "NVME Req now. hba_flag x%x\n", 1838 phba->hba_flag); 1839 return; 1840 } 1841 1842 lpfc_nbuf = freqpriv->nvme_buf; 1843 if (!lpfc_nbuf) { 1844 spin_unlock_irqrestore(&phba->hbalock, flags); 1845 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1846 "6140 NVME IO req has no matching lpfc nvme " 1847 "io buffer. Skipping abort req.\n"); 1848 return; 1849 } else if (!lpfc_nbuf->nvmeCmd) { 1850 spin_unlock_irqrestore(&phba->hbalock, flags); 1851 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1852 "6141 lpfc NVME IO req has no nvme_fcreq " 1853 "io buffer. Skipping abort req.\n"); 1854 return; 1855 } 1856 nvmereq_wqe = &lpfc_nbuf->cur_iocbq; 1857 1858 /* Guard against IO completion being called at same time */ 1859 spin_lock(&lpfc_nbuf->buf_lock); 1860 1861 /* 1862 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's 1863 * state must match the nvme_fcreq passed by the nvme 1864 * transport. If they don't match, it is likely the driver 1865 * has already completed the NVME IO and the nvme transport 1866 * has not seen it yet. 1867 */ 1868 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1869 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1870 "6143 NVME req mismatch: " 1871 "lpfc_nbuf x%px nvmeCmd x%px, " 1872 "pnvme_fcreq x%px. Skipping Abort xri x%x\n", 1873 lpfc_nbuf, lpfc_nbuf->nvmeCmd, 1874 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1875 goto out_unlock; 1876 } 1877 1878 /* Don't abort IOs no longer on the pending queue. */ 1879 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 1880 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1881 "6142 NVME IO req x%px not queued - skipping " 1882 "abort req xri x%x\n", 1883 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1884 goto out_unlock; 1885 } 1886 1887 atomic_inc(&lport->xmt_fcp_abort); 1888 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", 1889 nvmereq_wqe->sli4_xritag, 1890 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); 1891 1892 /* Outstanding abort is in progress */ 1893 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { 1894 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1895 "6144 Outstanding NVME I/O Abort Request " 1896 "still pending on nvme_fcreq x%px, " 1897 "lpfc_ncmd x%px xri x%x\n", 1898 pnvme_fcreq, lpfc_nbuf, 1899 nvmereq_wqe->sli4_xritag); 1900 goto out_unlock; 1901 } 1902 1903 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, 1904 lpfc_nvme_abort_fcreq_cmpl); 1905 1906 spin_unlock(&lpfc_nbuf->buf_lock); 1907 spin_unlock_irqrestore(&phba->hbalock, flags); 1908 1909 /* Make sure HBA is alive */ 1910 lpfc_issue_hb_tmo(phba); 1911 1912 if (ret_val != WQE_SUCCESS) { 1913 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1914 "6137 Failed abts issue_wqe with status x%x " 1915 "for nvme_fcreq x%px.\n", 1916 ret_val, pnvme_fcreq); 1917 return; 1918 } 1919 1920 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1921 "6138 Transport Abort NVME Request Issued for " 1922 "ox_id x%x\n", 1923 nvmereq_wqe->sli4_xritag); 1924 return; 1925 1926 out_unlock: 1927 spin_unlock(&lpfc_nbuf->buf_lock); 1928 spin_unlock_irqrestore(&phba->hbalock, flags); 1929 return; 1930 } 1931 1932 /* Declare and initialization an instance of the FC NVME template. */ 1933 static struct nvme_fc_port_template lpfc_nvme_template = { 1934 /* initiator-based functions */ 1935 .localport_delete = lpfc_nvme_localport_delete, 1936 .remoteport_delete = lpfc_nvme_remoteport_delete, 1937 .create_queue = lpfc_nvme_create_queue, 1938 .delete_queue = lpfc_nvme_delete_queue, 1939 .ls_req = lpfc_nvme_ls_req, 1940 .fcp_io = lpfc_nvme_fcp_io_submit, 1941 .ls_abort = lpfc_nvme_ls_abort, 1942 .fcp_abort = lpfc_nvme_fcp_abort, 1943 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, 1944 1945 .max_hw_queues = 1, 1946 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1947 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1948 .dma_boundary = 0xFFFFFFFF, 1949 1950 /* Sizes of additional private data for data structures. 1951 * No use for the last two sizes at this time. 1952 */ 1953 .local_priv_sz = sizeof(struct lpfc_nvme_lport), 1954 .remote_priv_sz = sizeof(struct lpfc_nvme_rport), 1955 .lsrqst_priv_sz = 0, 1956 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), 1957 }; 1958 1959 /* 1960 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA 1961 * 1962 * This routine removes a nvme buffer from head of @hdwq io_buf_list 1963 * and returns to caller. 1964 * 1965 * Return codes: 1966 * NULL - Error 1967 * Pointer to lpfc_nvme_buf - Success 1968 **/ 1969 static struct lpfc_io_buf * 1970 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1971 int idx, int expedite) 1972 { 1973 struct lpfc_io_buf *lpfc_ncmd; 1974 struct lpfc_sli4_hdw_queue *qp; 1975 struct sli4_sge *sgl; 1976 struct lpfc_iocbq *pwqeq; 1977 union lpfc_wqe128 *wqe; 1978 1979 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); 1980 1981 if (lpfc_ncmd) { 1982 pwqeq = &(lpfc_ncmd->cur_iocbq); 1983 wqe = &pwqeq->wqe; 1984 1985 /* Setup key fields in buffer that may have been changed 1986 * if other protocols used this buffer. 1987 */ 1988 pwqeq->cmd_flag = LPFC_IO_NVME; 1989 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; 1990 lpfc_ncmd->start_time = jiffies; 1991 lpfc_ncmd->flags = 0; 1992 1993 /* Rsp SGE will be filled in when we rcv an IO 1994 * from the NVME Layer to be sent. 1995 * The cmd is going to be embedded so we need a SKIP SGE. 1996 */ 1997 sgl = lpfc_ncmd->dma_sgl; 1998 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1999 bf_set(lpfc_sli4_sge_last, sgl, 0); 2000 sgl->word2 = cpu_to_le32(sgl->word2); 2001 /* Fill in word 3 / sgl_len during cmd submission */ 2002 2003 /* Initialize 64 bytes only */ 2004 memset(wqe, 0, sizeof(union lpfc_wqe)); 2005 2006 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 2007 atomic_inc(&ndlp->cmd_pending); 2008 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 2009 } 2010 2011 } else { 2012 qp = &phba->sli4_hba.hdwq[idx]; 2013 qp->empty_io_bufs++; 2014 } 2015 2016 return lpfc_ncmd; 2017 } 2018 2019 /** 2020 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. 2021 * @phba: The Hba for which this call is being executed. 2022 * @lpfc_ncmd: The nvme buffer which is being released. 2023 * 2024 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba 2025 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer 2026 * and cannot be reused for at least RA_TOV amount of time if it was 2027 * aborted. 2028 **/ 2029 static void 2030 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) 2031 { 2032 struct lpfc_sli4_hdw_queue *qp; 2033 unsigned long iflag = 0; 2034 2035 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) 2036 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); 2037 2038 lpfc_ncmd->ndlp = NULL; 2039 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 2040 2041 qp = lpfc_ncmd->hdwq; 2042 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 2043 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2044 "6310 XB release deferred for " 2045 "ox_id x%x on reqtag x%x\n", 2046 lpfc_ncmd->cur_iocbq.sli4_xritag, 2047 lpfc_ncmd->cur_iocbq.iotag); 2048 2049 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 2050 list_add_tail(&lpfc_ncmd->list, 2051 &qp->lpfc_abts_io_buf_list); 2052 qp->abts_nvme_io_bufs++; 2053 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 2054 } else 2055 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); 2056 } 2057 2058 /** 2059 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. 2060 * @vport: the lpfc_vport instance requesting a localport. 2061 * 2062 * This routine is invoked to create an nvme localport instance to bind 2063 * to the nvme_fc_transport. It is called once during driver load 2064 * like lpfc_create_shost after all other services are initialized. 2065 * It requires a vport, vpi, and wwns at call time. Other localport 2066 * parameters are modified as the driver's FCID and the Fabric WWN 2067 * are established. 2068 * 2069 * Return codes 2070 * 0 - successful 2071 * -ENOMEM - no heap memory available 2072 * other values - from nvme registration upcall 2073 **/ 2074 int 2075 lpfc_nvme_create_localport(struct lpfc_vport *vport) 2076 { 2077 int ret = 0; 2078 struct lpfc_hba *phba = vport->phba; 2079 struct nvme_fc_port_info nfcp_info; 2080 struct nvme_fc_local_port *localport; 2081 struct lpfc_nvme_lport *lport; 2082 2083 /* Initialize this localport instance. The vport wwn usage ensures 2084 * that NPIV is accounted for. 2085 */ 2086 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); 2087 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; 2088 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2089 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 2090 2091 /* We need to tell the transport layer + 1 because it takes page 2092 * alignment into account. When space for the SGL is allocated we 2093 * allocate + 3, one for cmd, one for rsp and one for this alignment 2094 */ 2095 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 2096 2097 /* Advertise how many hw queues we support based on cfg_hdw_queue, 2098 * which will not exceed cpu count. 2099 */ 2100 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; 2101 2102 if (!IS_ENABLED(CONFIG_NVME_FC)) 2103 return ret; 2104 2105 /* localport is allocated from the stack, but the registration 2106 * call allocates heap memory as well as the private area. 2107 */ 2108 2109 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2110 &vport->phba->pcidev->dev, &localport); 2111 if (!ret) { 2112 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2113 "6005 Successfully registered local " 2114 "NVME port num %d, localP x%px, private " 2115 "x%px, sg_seg %d\n", 2116 localport->port_num, localport, 2117 localport->private, 2118 lpfc_nvme_template.max_sgl_segments); 2119 2120 /* Private is our lport size declared in the template. */ 2121 lport = (struct lpfc_nvme_lport *)localport->private; 2122 vport->localport = localport; 2123 lport->vport = vport; 2124 vport->nvmei_support = 1; 2125 2126 atomic_set(&lport->xmt_fcp_noxri, 0); 2127 atomic_set(&lport->xmt_fcp_bad_ndlp, 0); 2128 atomic_set(&lport->xmt_fcp_qdepth, 0); 2129 atomic_set(&lport->xmt_fcp_err, 0); 2130 atomic_set(&lport->xmt_fcp_wqerr, 0); 2131 atomic_set(&lport->xmt_fcp_abort, 0); 2132 atomic_set(&lport->xmt_ls_abort, 0); 2133 atomic_set(&lport->xmt_ls_err, 0); 2134 atomic_set(&lport->cmpl_fcp_xb, 0); 2135 atomic_set(&lport->cmpl_fcp_err, 0); 2136 atomic_set(&lport->cmpl_ls_xb, 0); 2137 atomic_set(&lport->cmpl_ls_err, 0); 2138 2139 atomic_set(&lport->fc4NvmeLsRequests, 0); 2140 atomic_set(&lport->fc4NvmeLsCmpls, 0); 2141 } 2142 2143 return ret; 2144 } 2145 2146 #if (IS_ENABLED(CONFIG_NVME_FC)) 2147 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. 2148 * 2149 * The driver has to wait for the host nvme transport to callback 2150 * indicating the localport has successfully unregistered all 2151 * resources. Since this is an uninterruptible wait, loop every ten 2152 * seconds and print a message indicating no progress. 2153 * 2154 * An uninterruptible wait is used because of the risk of transport-to- 2155 * driver state mismatch. 2156 */ 2157 static void 2158 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2159 struct lpfc_nvme_lport *lport, 2160 struct completion *lport_unreg_cmp) 2161 { 2162 u32 wait_tmo; 2163 int ret, i, pending = 0; 2164 struct lpfc_sli_ring *pring; 2165 struct lpfc_hba *phba = vport->phba; 2166 struct lpfc_sli4_hdw_queue *qp; 2167 int abts_scsi, abts_nvme; 2168 2169 /* Host transport has to clean up and confirm requiring an indefinite 2170 * wait. Print a message if a 10 second wait expires and renew the 2171 * wait. This is unexpected. 2172 */ 2173 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); 2174 while (true) { 2175 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); 2176 if (unlikely(!ret)) { 2177 pending = 0; 2178 abts_scsi = 0; 2179 abts_nvme = 0; 2180 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2181 qp = &phba->sli4_hba.hdwq[i]; 2182 if (!vport->localport || !qp || !qp->io_wq) 2183 return; 2184 2185 pring = qp->io_wq->pring; 2186 if (!pring) 2187 continue; 2188 pending += pring->txcmplq_cnt; 2189 abts_scsi += qp->abts_scsi_io_bufs; 2190 abts_nvme += qp->abts_nvme_io_bufs; 2191 } 2192 if (!vport->localport || 2193 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || 2194 vport->load_flag & FC_UNLOADING) 2195 return; 2196 2197 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2198 "6176 Lport x%px Localport x%px wait " 2199 "timed out. Pending %d [%d:%d]. " 2200 "Renewing.\n", 2201 lport, vport->localport, pending, 2202 abts_scsi, abts_nvme); 2203 continue; 2204 } 2205 break; 2206 } 2207 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 2208 "6177 Lport x%px Localport x%px Complete Success\n", 2209 lport, vport->localport); 2210 } 2211 #endif 2212 2213 /** 2214 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. 2215 * @vport: pointer to a host virtual N_Port data structure 2216 * 2217 * This routine is invoked to destroy all lports bound to the phba. 2218 * The lport memory was allocated by the nvme fc transport and is 2219 * released there. This routine ensures all rports bound to the 2220 * lport have been disconnected. 2221 * 2222 **/ 2223 void 2224 lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2225 { 2226 #if (IS_ENABLED(CONFIG_NVME_FC)) 2227 struct nvme_fc_local_port *localport; 2228 struct lpfc_nvme_lport *lport; 2229 int ret; 2230 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); 2231 2232 if (vport->nvmei_support == 0) 2233 return; 2234 2235 localport = vport->localport; 2236 if (!localport) 2237 return; 2238 lport = (struct lpfc_nvme_lport *)localport->private; 2239 2240 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2241 "6011 Destroying NVME localport x%px\n", 2242 localport); 2243 2244 /* lport's rport list is clear. Unregister 2245 * lport and release resources. 2246 */ 2247 lport->lport_unreg_cmp = &lport_unreg_cmp; 2248 ret = nvme_fc_unregister_localport(localport); 2249 2250 /* Wait for completion. This either blocks 2251 * indefinitely or succeeds 2252 */ 2253 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); 2254 vport->localport = NULL; 2255 2256 /* Regardless of the unregister upcall response, clear 2257 * nvmei_support. All rports are unregistered and the 2258 * driver will clean up. 2259 */ 2260 vport->nvmei_support = 0; 2261 if (ret == 0) { 2262 lpfc_printf_vlog(vport, 2263 KERN_INFO, LOG_NVME_DISC, 2264 "6009 Unregistered lport Success\n"); 2265 } else { 2266 lpfc_printf_vlog(vport, 2267 KERN_INFO, LOG_NVME_DISC, 2268 "6010 Unregistered lport " 2269 "Failed, status x%x\n", 2270 ret); 2271 } 2272 #endif 2273 } 2274 2275 void 2276 lpfc_nvme_update_localport(struct lpfc_vport *vport) 2277 { 2278 #if (IS_ENABLED(CONFIG_NVME_FC)) 2279 struct nvme_fc_local_port *localport; 2280 struct lpfc_nvme_lport *lport; 2281 2282 localport = vport->localport; 2283 if (!localport) { 2284 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2285 "6710 Update NVME fail. No localport\n"); 2286 return; 2287 } 2288 lport = (struct lpfc_nvme_lport *)localport->private; 2289 if (!lport) { 2290 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2291 "6171 Update NVME fail. localP x%px, No lport\n", 2292 localport); 2293 return; 2294 } 2295 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2296 "6012 Update NVME lport x%px did x%x\n", 2297 localport, vport->fc_myDID); 2298 2299 localport->port_id = vport->fc_myDID; 2300 if (localport->port_id == 0) 2301 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; 2302 else 2303 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; 2304 2305 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2306 "6030 bound lport x%px to DID x%06x\n", 2307 lport, localport->port_id); 2308 #endif 2309 } 2310 2311 int 2312 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2313 { 2314 #if (IS_ENABLED(CONFIG_NVME_FC)) 2315 int ret = 0; 2316 struct nvme_fc_local_port *localport; 2317 struct lpfc_nvme_lport *lport; 2318 struct lpfc_nvme_rport *rport; 2319 struct lpfc_nvme_rport *oldrport; 2320 struct nvme_fc_remote_port *remote_port; 2321 struct nvme_fc_port_info rpinfo; 2322 struct lpfc_nodelist *prev_ndlp = NULL; 2323 struct fc_rport *srport = ndlp->rport; 2324 2325 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2326 "6006 Register NVME PORT. DID x%06x nlptype x%x\n", 2327 ndlp->nlp_DID, ndlp->nlp_type); 2328 2329 localport = vport->localport; 2330 if (!localport) 2331 return 0; 2332 2333 lport = (struct lpfc_nvme_lport *)localport->private; 2334 2335 /* NVME rports are not preserved across devloss. 2336 * Just register this instance. Note, rpinfo->dev_loss_tmo 2337 * is left 0 to indicate accept transport defaults. The 2338 * driver communicates port role capabilities consistent 2339 * with the PRLI response data. 2340 */ 2341 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); 2342 rpinfo.port_id = ndlp->nlp_DID; 2343 if (ndlp->nlp_type & NLP_NVME_TARGET) 2344 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; 2345 if (ndlp->nlp_type & NLP_NVME_INITIATOR) 2346 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2347 2348 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) 2349 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 2350 2351 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2352 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2353 if (srport) 2354 rpinfo.dev_loss_tmo = srport->dev_loss_tmo; 2355 else 2356 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; 2357 2358 spin_lock_irq(&ndlp->lock); 2359 oldrport = lpfc_ndlp_get_nrport(ndlp); 2360 if (oldrport) { 2361 prev_ndlp = oldrport->ndlp; 2362 spin_unlock_irq(&ndlp->lock); 2363 } else { 2364 spin_unlock_irq(&ndlp->lock); 2365 if (!lpfc_nlp_get(ndlp)) { 2366 dev_warn(&vport->phba->pcidev->dev, 2367 "Warning - No node ref - exit register\n"); 2368 return 0; 2369 } 2370 } 2371 2372 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2373 if (!ret) { 2374 /* If the ndlp already has an nrport, this is just 2375 * a resume of the existing rport. Else this is a 2376 * new rport. 2377 */ 2378 /* Guard against an unregister/reregister 2379 * race that leaves the WAIT flag set. 2380 */ 2381 spin_lock_irq(&ndlp->lock); 2382 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2383 ndlp->fc4_xpt_flags |= NVME_XPT_REGD; 2384 spin_unlock_irq(&ndlp->lock); 2385 rport = remote_port->private; 2386 if (oldrport) { 2387 2388 /* Sever the ndlp<->rport association 2389 * before dropping the ndlp ref from 2390 * register. 2391 */ 2392 spin_lock_irq(&ndlp->lock); 2393 ndlp->nrport = NULL; 2394 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2395 spin_unlock_irq(&ndlp->lock); 2396 rport->ndlp = NULL; 2397 rport->remoteport = NULL; 2398 2399 /* Reference only removed if previous NDLP is no longer 2400 * active. It might be just a swap and removing the 2401 * reference would cause a premature cleanup. 2402 */ 2403 if (prev_ndlp && prev_ndlp != ndlp) { 2404 if (!prev_ndlp->nrport) 2405 lpfc_nlp_put(prev_ndlp); 2406 } 2407 } 2408 2409 /* Clean bind the rport to the ndlp. */ 2410 rport->remoteport = remote_port; 2411 rport->lport = lport; 2412 rport->ndlp = ndlp; 2413 spin_lock_irq(&ndlp->lock); 2414 ndlp->nrport = rport; 2415 spin_unlock_irq(&ndlp->lock); 2416 lpfc_printf_vlog(vport, KERN_INFO, 2417 LOG_NVME_DISC | LOG_NODE, 2418 "6022 Bind lport x%px to remoteport x%px " 2419 "rport x%px WWNN 0x%llx, " 2420 "Rport WWPN 0x%llx DID " 2421 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", 2422 lport, remote_port, rport, 2423 rpinfo.node_name, rpinfo.port_name, 2424 rpinfo.port_id, rpinfo.port_role, 2425 ndlp, prev_ndlp); 2426 } else { 2427 lpfc_printf_vlog(vport, KERN_ERR, 2428 LOG_TRACE_EVENT, 2429 "6031 RemotePort Registration failed " 2430 "err: %d, DID x%06x\n", 2431 ret, ndlp->nlp_DID); 2432 } 2433 2434 return ret; 2435 #else 2436 return 0; 2437 #endif 2438 } 2439 2440 /* 2441 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport 2442 * 2443 * If the ndlp represents an NVME Target, that we are logged into, 2444 * ping the NVME FC Transport layer to initiate a device rescan 2445 * on this remote NPort. 2446 */ 2447 void 2448 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2449 { 2450 #if (IS_ENABLED(CONFIG_NVME_FC)) 2451 struct lpfc_nvme_rport *nrport; 2452 struct nvme_fc_remote_port *remoteport = NULL; 2453 2454 spin_lock_irq(&ndlp->lock); 2455 nrport = lpfc_ndlp_get_nrport(ndlp); 2456 if (nrport) 2457 remoteport = nrport->remoteport; 2458 spin_unlock_irq(&ndlp->lock); 2459 2460 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2461 "6170 Rescan NPort DID x%06x type x%x " 2462 "state x%x nrport x%px remoteport x%px\n", 2463 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, 2464 nrport, remoteport); 2465 2466 if (!nrport || !remoteport) 2467 goto rescan_exit; 2468 2469 /* Only rescan if we are an NVME target in the MAPPED state */ 2470 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && 2471 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 2472 nvme_fc_rescan_remoteport(remoteport); 2473 2474 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2475 "6172 NVME rescanned DID x%06x " 2476 "port_state x%x\n", 2477 ndlp->nlp_DID, remoteport->port_state); 2478 } 2479 return; 2480 rescan_exit: 2481 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2482 "6169 Skip NVME Rport Rescan, NVME remoteport " 2483 "unregistered\n"); 2484 #endif 2485 } 2486 2487 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2488 * 2489 * There is no notion of Devloss or rport recovery from the current 2490 * nvme_transport perspective. Loss of an rport just means IO cannot 2491 * be sent and recovery is completely up to the initator. 2492 * For now, the driver just unbinds the DID and port_role so that 2493 * no further IO can be issued. Changes are planned for later. 2494 * 2495 * Notes - the ndlp reference count is not decremented here since 2496 * since there is no nvme_transport api for devloss. Node ref count 2497 * is only adjusted in driver unload. 2498 */ 2499 void 2500 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2501 { 2502 #if (IS_ENABLED(CONFIG_NVME_FC)) 2503 int ret; 2504 struct nvme_fc_local_port *localport; 2505 struct lpfc_nvme_lport *lport; 2506 struct lpfc_nvme_rport *rport; 2507 struct nvme_fc_remote_port *remoteport = NULL; 2508 2509 localport = vport->localport; 2510 2511 /* This is fundamental error. The localport is always 2512 * available until driver unload. Just exit. 2513 */ 2514 if (!localport) 2515 return; 2516 2517 lport = (struct lpfc_nvme_lport *)localport->private; 2518 if (!lport) 2519 goto input_err; 2520 2521 spin_lock_irq(&ndlp->lock); 2522 rport = lpfc_ndlp_get_nrport(ndlp); 2523 if (rport) 2524 remoteport = rport->remoteport; 2525 spin_unlock_irq(&ndlp->lock); 2526 if (!remoteport) 2527 goto input_err; 2528 2529 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2530 "6033 Unreg nvme remoteport x%px, portname x%llx, " 2531 "port_id x%06x, portstate x%x port type x%x " 2532 "refcnt %d\n", 2533 remoteport, remoteport->port_name, 2534 remoteport->port_id, remoteport->port_state, 2535 ndlp->nlp_type, kref_read(&ndlp->kref)); 2536 2537 /* Sanity check ndlp type. Only call for NVME ports. Don't 2538 * clear any rport state until the transport calls back. 2539 */ 2540 2541 if (ndlp->nlp_type & NLP_NVME_TARGET) { 2542 /* No concern about the role change on the nvme remoteport. 2543 * The transport will update it. 2544 */ 2545 spin_lock_irq(&vport->phba->hbalock); 2546 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; 2547 spin_unlock_irq(&vport->phba->hbalock); 2548 2549 /* Don't let the host nvme transport keep sending keep-alives 2550 * on this remoteport. Vport is unloading, no recovery. The 2551 * return values is ignored. The upcall is a courtesy to the 2552 * transport. 2553 */ 2554 if (vport->load_flag & FC_UNLOADING) 2555 (void)nvme_fc_set_remoteport_devloss(remoteport, 0); 2556 2557 ret = nvme_fc_unregister_remoteport(remoteport); 2558 2559 /* The driver no longer knows if the nrport memory is valid. 2560 * because the controller teardown process has begun and 2561 * is asynchronous. Break the binding in the ndlp. Also 2562 * remove the register ndlp reference to setup node release. 2563 */ 2564 ndlp->nrport = NULL; 2565 lpfc_nlp_put(ndlp); 2566 if (ret != 0) { 2567 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2568 "6167 NVME unregister failed %d " 2569 "port_state x%x\n", 2570 ret, remoteport->port_state); 2571 } 2572 } 2573 return; 2574 2575 input_err: 2576 #endif 2577 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2578 "6168 State error: lport x%px, rport x%px FCID x%06x\n", 2579 vport->localport, ndlp->rport, ndlp->nlp_DID); 2580 } 2581 2582 /** 2583 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort 2584 * @phba: pointer to lpfc hba data structure. 2585 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2586 * 2587 * This routine is invoked by the worker thread to process a SLI4 fast-path 2588 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2589 * here. 2590 **/ 2591 void 2592 lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, 2593 struct lpfc_io_buf *lpfc_ncmd) 2594 { 2595 struct nvmefc_fcp_req *nvme_cmd = NULL; 2596 2597 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2598 "6533 %s nvme_cmd %p tag x%x abort complete and " 2599 "xri released\n", __func__, 2600 lpfc_ncmd->nvmeCmd, 2601 lpfc_ncmd->cur_iocbq.iotag); 2602 2603 /* Aborted NVME commands are required to not complete 2604 * before the abort exchange command fully completes. 2605 * Once completed, it is available via the put list. 2606 */ 2607 if (lpfc_ncmd->nvmeCmd) { 2608 nvme_cmd = lpfc_ncmd->nvmeCmd; 2609 nvme_cmd->transferred_length = 0; 2610 nvme_cmd->rcv_rsplen = 0; 2611 nvme_cmd->status = NVME_SC_INTERNAL; 2612 nvme_cmd->done(nvme_cmd); 2613 lpfc_ncmd->nvmeCmd = NULL; 2614 } 2615 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2616 } 2617 2618 /** 2619 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort 2620 * @phba: pointer to lpfc hba data structure. 2621 * @axri: pointer to the fcp xri abort wcqe structure. 2622 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2623 * 2624 * This routine is invoked by the worker thread to process a SLI4 fast-path 2625 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2626 * here. 2627 **/ 2628 void 2629 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 2630 struct sli4_wcqe_xri_aborted *axri, 2631 struct lpfc_io_buf *lpfc_ncmd) 2632 { 2633 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 2634 struct nvmefc_fcp_req *nvme_cmd = NULL; 2635 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; 2636 2637 2638 if (ndlp) 2639 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 2640 2641 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2642 "6311 nvme_cmd %p xri x%x tag x%x abort complete and " 2643 "xri released\n", 2644 lpfc_ncmd->nvmeCmd, xri, 2645 lpfc_ncmd->cur_iocbq.iotag); 2646 2647 /* Aborted NVME commands are required to not complete 2648 * before the abort exchange command fully completes. 2649 * Once completed, it is available via the put list. 2650 */ 2651 if (lpfc_ncmd->nvmeCmd) { 2652 nvme_cmd = lpfc_ncmd->nvmeCmd; 2653 nvme_cmd->done(nvme_cmd); 2654 lpfc_ncmd->nvmeCmd = NULL; 2655 } 2656 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2657 } 2658 2659 /** 2660 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete 2661 * @phba: Pointer to HBA context object. 2662 * 2663 * This function flushes all wqes in the nvme rings and frees all resources 2664 * in the txcmplq. This function does not issue abort wqes for the IO 2665 * commands in txcmplq, they will just be returned with 2666 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 2667 * slot has been permanently disabled. 2668 **/ 2669 void 2670 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) 2671 { 2672 struct lpfc_sli_ring *pring; 2673 u32 i, wait_cnt = 0; 2674 2675 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) 2676 return; 2677 2678 /* Cycle through all IO rings and make sure all outstanding 2679 * WQEs have been removed from the txcmplqs. 2680 */ 2681 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2682 if (!phba->sli4_hba.hdwq[i].io_wq) 2683 continue; 2684 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 2685 2686 if (!pring) 2687 continue; 2688 2689 /* Retrieve everything on the txcmplq */ 2690 while (!list_empty(&pring->txcmplq)) { 2691 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 2692 wait_cnt++; 2693 2694 /* The sleep is 10mS. Every ten seconds, 2695 * dump a message. Something is wrong. 2696 */ 2697 if ((wait_cnt % 1000) == 0) { 2698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2699 "6178 NVME IO not empty, " 2700 "cnt %d\n", wait_cnt); 2701 } 2702 } 2703 } 2704 2705 /* Make sure HBA is alive */ 2706 lpfc_issue_hb_tmo(phba); 2707 2708 } 2709 2710 void 2711 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 2712 uint32_t stat, uint32_t param) 2713 { 2714 #if (IS_ENABLED(CONFIG_NVME_FC)) 2715 struct lpfc_io_buf *lpfc_ncmd; 2716 struct nvmefc_fcp_req *nCmd; 2717 struct lpfc_wcqe_complete wcqe; 2718 struct lpfc_wcqe_complete *wcqep = &wcqe; 2719 2720 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; 2721 if (!lpfc_ncmd) { 2722 lpfc_sli_release_iocbq(phba, pwqeIn); 2723 return; 2724 } 2725 /* For abort iocb just return, IO iocb will do a done call */ 2726 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == 2727 CMD_ABORT_XRI_CX) { 2728 lpfc_sli_release_iocbq(phba, pwqeIn); 2729 return; 2730 } 2731 2732 spin_lock(&lpfc_ncmd->buf_lock); 2733 nCmd = lpfc_ncmd->nvmeCmd; 2734 if (!nCmd) { 2735 spin_unlock(&lpfc_ncmd->buf_lock); 2736 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2737 return; 2738 } 2739 spin_unlock(&lpfc_ncmd->buf_lock); 2740 2741 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2742 "6194 NVME Cancel xri %x\n", 2743 lpfc_ncmd->cur_iocbq.sli4_xritag); 2744 2745 wcqep->word0 = 0; 2746 bf_set(lpfc_wcqe_c_status, wcqep, stat); 2747 wcqep->parameter = param; 2748 wcqep->word3 = 0; /* xb is 0 */ 2749 2750 /* Call release with XB=1 to queue the IO into the abort list. */ 2751 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 2752 bf_set(lpfc_wcqe_c_xb, wcqep, 1); 2753 2754 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); 2755 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); 2756 #endif 2757 } 2758