1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include "lpfc_version.h" 40 #include "lpfc_hw4.h" 41 #include "lpfc_hw.h" 42 #include "lpfc_sli.h" 43 #include "lpfc_sli4.h" 44 #include "lpfc_nl.h" 45 #include "lpfc_disc.h" 46 #include "lpfc.h" 47 #include "lpfc_nvme.h" 48 #include "lpfc_scsi.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_crtn.h" 51 #include "lpfc_vport.h" 52 #include "lpfc_debugfs.h" 53 54 /* NVME initiator-based functions */ 55 56 static struct lpfc_io_buf * 57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 58 int idx, int expedite); 59 60 static void 61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); 62 63 static struct nvme_fc_port_template lpfc_nvme_template; 64 65 /** 66 * lpfc_nvme_create_queue - 67 * @pnvme_lport: Transport localport that LS is to be issued from 68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 69 * @qsize: Size of the queue in bytes 70 * @handle: An opaque driver handle used in follow-up calls. 71 * 72 * Driver registers this routine to preallocate and initialize any 73 * internal data structures to bind the @qidx to its internal IO queues. 74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ. 75 * 76 * Return value : 77 * 0 - Success 78 * -EINVAL - Unsupported input value. 79 * -ENOMEM - Could not alloc necessary memory 80 **/ 81 static int 82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, 83 unsigned int qidx, u16 qsize, 84 void **handle) 85 { 86 struct lpfc_nvme_lport *lport; 87 struct lpfc_vport *vport; 88 struct lpfc_nvme_qhandle *qhandle; 89 char *str; 90 91 if (!pnvme_lport->private) 92 return -ENOMEM; 93 94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 95 vport = lport->vport; 96 97 if (!vport || vport->load_flag & FC_UNLOADING || 98 vport->phba->hba_flag & HBA_IOQ_FLUSH) 99 return -ENODEV; 100 101 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL); 102 if (qhandle == NULL) 103 return -ENOMEM; 104 105 qhandle->cpu_id = raw_smp_processor_id(); 106 qhandle->qidx = qidx; 107 /* 108 * NVME qidx == 0 is the admin queue, so both admin queue 109 * and first IO queue will use MSI-X vector and associated 110 * EQ/CQ/WQ at index 0. After that they are sequentially assigned. 111 */ 112 if (qidx) { 113 str = "IO "; /* IO queue */ 114 qhandle->index = ((qidx - 1) % 115 lpfc_nvme_template.max_hw_queues); 116 } else { 117 str = "ADM"; /* Admin queue */ 118 qhandle->index = qidx; 119 } 120 121 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 122 "6073 Binding %s HdwQueue %d (cpu %d) to " 123 "hdw_queue %d qhandle x%px\n", str, 124 qidx, qhandle->cpu_id, qhandle->index, qhandle); 125 *handle = (void *)qhandle; 126 return 0; 127 } 128 129 /** 130 * lpfc_nvme_delete_queue - 131 * @pnvme_lport: Transport localport that LS is to be issued from 132 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. 133 * @handle: An opaque driver handle from lpfc_nvme_create_queue 134 * 135 * Driver registers this routine to free 136 * any internal data structures to bind the @qidx to its internal 137 * IO queues. 138 * 139 * Return value : 140 * 0 - Success 141 * TODO: What are the failure codes. 142 **/ 143 static void 144 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, 145 unsigned int qidx, 146 void *handle) 147 { 148 struct lpfc_nvme_lport *lport; 149 struct lpfc_vport *vport; 150 151 if (!pnvme_lport->private) 152 return; 153 154 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 155 vport = lport->vport; 156 157 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 158 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", 159 lport, qidx, handle); 160 kfree(handle); 161 } 162 163 static void 164 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) 165 { 166 struct lpfc_nvme_lport *lport = localport->private; 167 168 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, 169 "6173 localport x%px delete complete\n", 170 lport); 171 172 /* release any threads waiting for the unreg to complete */ 173 if (lport->vport->localport) 174 complete(lport->lport_unreg_cmp); 175 } 176 177 /* lpfc_nvme_remoteport_delete 178 * 179 * @remoteport: Pointer to an nvme transport remoteport instance. 180 * 181 * This is a template downcall. NVME transport calls this function 182 * when it has completed the unregistration of a previously 183 * registered remoteport. 184 * 185 * Return value : 186 * None 187 */ 188 static void 189 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) 190 { 191 struct lpfc_nvme_rport *rport = remoteport->private; 192 struct lpfc_vport *vport; 193 struct lpfc_nodelist *ndlp; 194 u32 fc4_xpt_flags; 195 196 ndlp = rport->ndlp; 197 if (!ndlp) { 198 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n", 199 __func__, rport, remoteport); 200 goto rport_err; 201 } 202 203 vport = ndlp->vport; 204 if (!vport) { 205 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n", 206 __func__, ndlp, ndlp->nlp_state, rport); 207 goto rport_err; 208 } 209 210 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD; 211 212 /* Remove this rport from the lport's list - memory is owned by the 213 * transport. Remove the ndlp reference for the NVME transport before 214 * calling state machine to remove the node. 215 */ 216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 217 "6146 remoteport delete of remoteport x%px, ndlp x%px " 218 "DID x%x xflags x%x\n", 219 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags); 220 spin_lock_irq(&ndlp->lock); 221 222 /* The register rebind might have occurred before the delete 223 * downcall. Guard against this race. 224 */ 225 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) 226 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); 227 228 spin_unlock_irq(&ndlp->lock); 229 230 /* On a devloss timeout event, one more put is executed provided the 231 * NVME and SCSI rport unregister requests are complete. If the vport 232 * is unloading, this extra put is executed by lpfc_drop_node. 233 */ 234 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags)) 235 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 236 237 rport_err: 238 return; 239 } 240 241 /** 242 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request 243 * @phba: pointer to lpfc hba data structure. 244 * @axchg: pointer to exchange context for the NVME LS request 245 * 246 * This routine is used for processing an asychronously received NVME LS 247 * request. Any remaining validation is done and the LS is then forwarded 248 * to the nvme-fc transport via nvme_fc_rcv_ls_req(). 249 * 250 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing) 251 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done. 252 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg. 253 * 254 * Returns 0 if LS was handled and delivered to the transport 255 * Returns 1 if LS failed to be handled and should be dropped 256 */ 257 int 258 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, 259 struct lpfc_async_xchg_ctx *axchg) 260 { 261 #if (IS_ENABLED(CONFIG_NVME_FC)) 262 struct lpfc_vport *vport; 263 struct lpfc_nvme_rport *lpfc_rport; 264 struct nvme_fc_remote_port *remoteport; 265 struct lpfc_nvme_lport *lport; 266 uint32_t *payload = axchg->payload; 267 int rc; 268 269 vport = axchg->ndlp->vport; 270 lpfc_rport = axchg->ndlp->nrport; 271 if (!lpfc_rport) 272 return -EINVAL; 273 274 remoteport = lpfc_rport->remoteport; 275 if (!vport->localport || 276 vport->phba->hba_flag & HBA_IOQ_FLUSH) 277 return -EINVAL; 278 279 lport = vport->localport->private; 280 if (!lport) 281 return -EINVAL; 282 283 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload, 284 axchg->size); 285 286 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 287 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x " 288 "%08x %08x %08x\n", 289 axchg->size, rc, 290 *payload, *(payload+1), *(payload+2), 291 *(payload+3), *(payload+4), *(payload+5)); 292 293 if (!rc) 294 return 0; 295 #endif 296 return 1; 297 } 298 299 /** 300 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME 301 * LS request. 302 * @phba: Pointer to HBA context object 303 * @vport: The local port that issued the LS 304 * @cmdwqe: Pointer to driver command WQE object. 305 * @wcqe: Pointer to driver response CQE object. 306 * 307 * This function is the generic completion handler for NVME LS requests. 308 * The function updates any states and statistics, calls the transport 309 * ls_req done() routine, then tears down the command and buffers used 310 * for the LS request. 311 **/ 312 void 313 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, 314 struct lpfc_iocbq *cmdwqe, 315 struct lpfc_wcqe_complete *wcqe) 316 { 317 struct nvmefc_ls_req *pnvme_lsreq; 318 struct lpfc_dmabuf *buf_ptr; 319 struct lpfc_nodelist *ndlp; 320 uint32_t status; 321 322 pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; 323 ndlp = cmdwqe->ndlp; 324 buf_ptr = cmdwqe->bpl_dmabuf; 325 326 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 327 328 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 329 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " 330 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px " 331 "ndlp:x%px\n", 332 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 333 cmdwqe->sli4_xritag, status, 334 (wcqe->parameter & 0xffff), 335 cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf, 336 ndlp); 337 338 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n", 339 cmdwqe->sli4_xritag, status, wcqe->parameter); 340 341 if (buf_ptr) { 342 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 343 kfree(buf_ptr); 344 cmdwqe->bpl_dmabuf = NULL; 345 } 346 if (pnvme_lsreq->done) 347 pnvme_lsreq->done(pnvme_lsreq, status); 348 else 349 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 350 "6046 NVMEx cmpl without done call back? " 351 "Data x%px DID %x Xri: %x status %x\n", 352 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, 353 cmdwqe->sli4_xritag, status); 354 if (ndlp) { 355 lpfc_nlp_put(ndlp); 356 cmdwqe->ndlp = NULL; 357 } 358 lpfc_sli_release_iocbq(phba, cmdwqe); 359 } 360 361 static void 362 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 363 struct lpfc_iocbq *rspwqe) 364 { 365 struct lpfc_vport *vport = cmdwqe->vport; 366 struct lpfc_nvme_lport *lport; 367 uint32_t status; 368 struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; 369 370 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; 371 372 if (vport->localport) { 373 lport = (struct lpfc_nvme_lport *)vport->localport->private; 374 if (lport) { 375 atomic_inc(&lport->fc4NvmeLsCmpls); 376 if (status) { 377 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 378 atomic_inc(&lport->cmpl_ls_xb); 379 atomic_inc(&lport->cmpl_ls_err); 380 } 381 } 382 } 383 384 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe); 385 } 386 387 static int 388 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, 389 struct lpfc_dmabuf *inp, 390 struct nvmefc_ls_req *pnvme_lsreq, 391 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, 392 struct lpfc_iocbq *), 393 struct lpfc_nodelist *ndlp, uint32_t num_entry, 394 uint32_t tmo, uint8_t retry) 395 { 396 struct lpfc_hba *phba = vport->phba; 397 union lpfc_wqe128 *wqe; 398 struct lpfc_iocbq *genwqe; 399 struct ulp_bde64 *bpl; 400 struct ulp_bde64 bde; 401 int i, rc, xmit_len, first_len; 402 403 /* Allocate buffer for command WQE */ 404 genwqe = lpfc_sli_get_iocbq(phba); 405 if (genwqe == NULL) 406 return 1; 407 408 wqe = &genwqe->wqe; 409 /* Initialize only 64 bytes */ 410 memset(wqe, 0, sizeof(union lpfc_wqe)); 411 412 genwqe->bpl_dmabuf = bmp; 413 genwqe->cmd_flag |= LPFC_IO_NVME_LS; 414 415 /* Save for completion so we can release these resources */ 416 genwqe->ndlp = lpfc_nlp_get(ndlp); 417 if (!genwqe->ndlp) { 418 dev_warn(&phba->pcidev->dev, 419 "Warning: Failed node ref, not sending LS_REQ\n"); 420 lpfc_sli_release_iocbq(phba, genwqe); 421 return 1; 422 } 423 424 genwqe->context_un.nvme_lsreq = pnvme_lsreq; 425 /* Fill in payload, bp points to frame payload */ 426 427 if (!tmo) 428 /* FC spec states we need 3 * ratov for CT requests */ 429 tmo = (3 * phba->fc_ratov); 430 431 /* For this command calculate the xmit length of the request bde. */ 432 xmit_len = 0; 433 first_len = 0; 434 bpl = (struct ulp_bde64 *)bmp->virt; 435 for (i = 0; i < num_entry; i++) { 436 bde.tus.w = bpl[i].tus.w; 437 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 438 break; 439 xmit_len += bde.tus.f.bdeSize; 440 if (i == 0) 441 first_len = xmit_len; 442 } 443 444 genwqe->num_bdes = num_entry; 445 genwqe->hba_wqidx = 0; 446 447 /* Words 0 - 2 */ 448 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 449 wqe->generic.bde.tus.f.bdeSize = first_len; 450 wqe->generic.bde.addrLow = bpl[0].addrLow; 451 wqe->generic.bde.addrHigh = bpl[0].addrHigh; 452 453 /* Word 3 */ 454 wqe->gen_req.request_payload_len = first_len; 455 456 /* Word 4 */ 457 458 /* Word 5 */ 459 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0); 460 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1); 461 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1); 462 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ); 463 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME); 464 465 /* Word 6 */ 466 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com, 467 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 468 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag); 469 470 /* Word 7 */ 471 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo); 472 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3); 473 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE); 474 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI); 475 476 /* Word 8 */ 477 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag; 478 479 /* Word 9 */ 480 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag); 481 482 /* Word 10 */ 483 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 484 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 485 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 486 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 487 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 488 489 /* Word 11 */ 490 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 491 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND); 492 493 494 /* Issue GEN REQ WQE for NPORT <did> */ 495 genwqe->cmd_cmpl = cmpl; 496 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; 497 genwqe->vport = vport; 498 genwqe->retry = retry; 499 500 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", 501 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); 502 503 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); 504 if (rc) { 505 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 506 "6045 Issue GEN REQ WQE to NPORT x%x " 507 "Data: x%x x%x rc x%x\n", 508 ndlp->nlp_DID, genwqe->iotag, 509 vport->port_state, rc); 510 lpfc_nlp_put(ndlp); 511 lpfc_sli_release_iocbq(phba, genwqe); 512 return 1; 513 } 514 515 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS, 516 "6050 Issue GEN REQ WQE to NPORT x%x " 517 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px " 518 "bmp:x%px xmit:%d 1st:%d\n", 519 ndlp->nlp_DID, genwqe->sli4_xritag, 520 vport->port_state, 521 genwqe, pnvme_lsreq, bmp, xmit_len, first_len); 522 return 0; 523 } 524 525 526 /** 527 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request 528 * @vport: The local port issuing the LS 529 * @ndlp: The remote port to send the LS to 530 * @pnvme_lsreq: Pointer to LS request structure from the transport 531 * @gen_req_cmp: Completion call-back 532 * 533 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST 534 * WQE to perform the LS operation. 535 * 536 * Return value : 537 * 0 - Success 538 * non-zero: various error codes, in form of -Exxx 539 **/ 540 int 541 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 542 struct nvmefc_ls_req *pnvme_lsreq, 543 void (*gen_req_cmp)(struct lpfc_hba *phba, 544 struct lpfc_iocbq *cmdwqe, 545 struct lpfc_iocbq *rspwqe)) 546 { 547 struct lpfc_dmabuf *bmp; 548 struct ulp_bde64 *bpl; 549 int ret; 550 uint16_t ntype, nstate; 551 552 if (!ndlp) { 553 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 554 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing " 555 "LS Req\n", 556 ndlp); 557 return -ENODEV; 558 } 559 560 ntype = ndlp->nlp_type; 561 nstate = ndlp->nlp_state; 562 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) || 563 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) { 564 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 565 "6088 NVMEx LS REQ: Fail DID x%06x not " 566 "ready for IO. Type x%x, State x%x\n", 567 ndlp->nlp_DID, ntype, nstate); 568 return -ENODEV; 569 } 570 if (vport->phba->hba_flag & HBA_IOQ_FLUSH) 571 return -ENODEV; 572 573 if (!vport->phba->sli4_hba.nvmels_wq) 574 return -ENOMEM; 575 576 /* 577 * there are two dma buf in the request, actually there is one and 578 * the second one is just the start address + cmd size. 579 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped 580 * in a lpfc_dmabuf struct. When freeing we just free the wrapper 581 * because the nvem layer owns the data bufs. 582 * We do not have to break these packets open, we don't care what is 583 * in them. And we do not have to look at the resonse data, we only 584 * care that we got a response. All of the caring is going to happen 585 * in the nvme-fc layer. 586 */ 587 588 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); 589 if (!bmp) { 590 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 591 "6044 NVMEx LS REQ: Could not alloc LS buf " 592 "for DID %x\n", 593 ndlp->nlp_DID); 594 return -ENOMEM; 595 } 596 597 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys)); 598 if (!bmp->virt) { 599 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 600 "6042 NVMEx LS REQ: Could not alloc mbuf " 601 "for DID %x\n", 602 ndlp->nlp_DID); 603 kfree(bmp); 604 return -ENOMEM; 605 } 606 607 INIT_LIST_HEAD(&bmp->list); 608 609 bpl = (struct ulp_bde64 *)bmp->virt; 610 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma)); 611 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma)); 612 bpl->tus.f.bdeFlags = 0; 613 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen; 614 bpl->tus.w = le32_to_cpu(bpl->tus.w); 615 bpl++; 616 617 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma)); 618 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma)); 619 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 620 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen; 621 bpl->tus.w = le32_to_cpu(bpl->tus.w); 622 623 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 624 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, " 625 "rqstlen:%d rsplen:%d %pad %pad\n", 626 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen, 627 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 628 &pnvme_lsreq->rspdma); 629 630 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr, 631 pnvme_lsreq, gen_req_cmp, ndlp, 2, 632 pnvme_lsreq->timeout, 0); 633 if (ret != WQE_SUCCESS) { 634 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 635 "6052 NVMEx REQ: EXIT. issue ls wqe failed " 636 "lsreq x%px Status %x DID %x\n", 637 pnvme_lsreq, ret, ndlp->nlp_DID); 638 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); 639 kfree(bmp); 640 return -EIO; 641 } 642 643 return 0; 644 } 645 646 /** 647 * lpfc_nvme_ls_req - Issue an NVME Link Service request 648 * @pnvme_lport: Transport localport that LS is to be issued from. 649 * @pnvme_rport: Transport remoteport that LS is to be sent to. 650 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 651 * 652 * Driver registers this routine to handle any link service request 653 * from the nvme_fc transport to a remote nvme-aware port. 654 * 655 * Return value : 656 * 0 - Success 657 * non-zero: various error codes, in form of -Exxx 658 **/ 659 static int 660 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, 661 struct nvme_fc_remote_port *pnvme_rport, 662 struct nvmefc_ls_req *pnvme_lsreq) 663 { 664 struct lpfc_nvme_lport *lport; 665 struct lpfc_nvme_rport *rport; 666 struct lpfc_vport *vport; 667 int ret; 668 669 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 670 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 671 if (unlikely(!lport) || unlikely(!rport)) 672 return -EINVAL; 673 674 vport = lport->vport; 675 if (vport->load_flag & FC_UNLOADING || 676 vport->phba->hba_flag & HBA_IOQ_FLUSH) 677 return -ENODEV; 678 679 atomic_inc(&lport->fc4NvmeLsRequests); 680 681 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq, 682 lpfc_nvme_ls_req_cmp); 683 if (ret) 684 atomic_inc(&lport->xmt_ls_err); 685 686 return ret; 687 } 688 689 /** 690 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior 691 * NVME LS request 692 * @vport: The local port that issued the LS 693 * @ndlp: The remote port the LS was sent to 694 * @pnvme_lsreq: Pointer to LS request structure from the transport 695 * 696 * The driver validates the ndlp, looks for the LS, and aborts the 697 * LS if found. 698 * 699 * Returns: 700 * 0 : if LS found and aborted 701 * non-zero: various error conditions in form -Exxx 702 **/ 703 int 704 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 705 struct nvmefc_ls_req *pnvme_lsreq) 706 { 707 struct lpfc_hba *phba = vport->phba; 708 struct lpfc_sli_ring *pring; 709 struct lpfc_iocbq *wqe, *next_wqe; 710 bool foundit = false; 711 712 if (!ndlp) { 713 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 714 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID " 715 "x%06x, Failing LS Req\n", 716 ndlp, ndlp ? ndlp->nlp_DID : 0); 717 return -EINVAL; 718 } 719 720 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 721 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq " 722 "x%px rqstlen:%d rsplen:%d %pad %pad\n", 723 pnvme_lsreq, pnvme_lsreq->rqstlen, 724 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, 725 &pnvme_lsreq->rspdma); 726 727 /* 728 * Lock the ELS ring txcmplq and look for the wqe that matches 729 * this ELS. If found, issue an abort on the wqe. 730 */ 731 pring = phba->sli4_hba.nvmels_wq->pring; 732 spin_lock_irq(&phba->hbalock); 733 spin_lock(&pring->ring_lock); 734 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) { 735 if (wqe->context_un.nvme_lsreq == pnvme_lsreq) { 736 wqe->cmd_flag |= LPFC_DRIVER_ABORTED; 737 foundit = true; 738 break; 739 } 740 } 741 spin_unlock(&pring->ring_lock); 742 743 if (foundit) 744 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL); 745 spin_unlock_irq(&phba->hbalock); 746 747 if (foundit) 748 return 0; 749 750 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS, 751 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n", 752 pnvme_lsreq); 753 return -EINVAL; 754 } 755 756 static int 757 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport, 758 struct nvme_fc_remote_port *remoteport, 759 struct nvmefc_ls_rsp *ls_rsp) 760 { 761 struct lpfc_async_xchg_ctx *axchg = 762 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp); 763 struct lpfc_nvme_lport *lport; 764 int rc; 765 766 if (axchg->phba->pport->load_flag & FC_UNLOADING) 767 return -ENODEV; 768 769 lport = (struct lpfc_nvme_lport *)localport->private; 770 771 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp); 772 773 if (rc) { 774 /* 775 * unless the failure is due to having already sent 776 * the response, an abort will be generated for the 777 * exchange if the rsp can't be sent. 778 */ 779 if (rc != -EALREADY) 780 atomic_inc(&lport->xmt_ls_abort); 781 return rc; 782 } 783 784 return 0; 785 } 786 787 /** 788 * lpfc_nvme_ls_abort - Abort a prior NVME LS request 789 * @pnvme_lport: Transport localport that LS is to be issued from. 790 * @pnvme_rport: Transport remoteport that LS is to be sent to. 791 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS 792 * 793 * Driver registers this routine to abort a NVME LS request that is 794 * in progress (from the transports perspective). 795 **/ 796 static void 797 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, 798 struct nvme_fc_remote_port *pnvme_rport, 799 struct nvmefc_ls_req *pnvme_lsreq) 800 { 801 struct lpfc_nvme_lport *lport; 802 struct lpfc_vport *vport; 803 struct lpfc_nodelist *ndlp; 804 int ret; 805 806 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 807 if (unlikely(!lport)) 808 return; 809 vport = lport->vport; 810 811 if (vport->load_flag & FC_UNLOADING) 812 return; 813 814 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id); 815 816 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq); 817 if (!ret) 818 atomic_inc(&lport->xmt_ls_abort); 819 } 820 821 /* Fix up the existing sgls for NVME IO. */ 822 static inline void 823 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, 824 struct lpfc_io_buf *lpfc_ncmd, 825 struct nvmefc_fcp_req *nCmd) 826 { 827 struct lpfc_hba *phba = vport->phba; 828 struct sli4_sge *sgl; 829 union lpfc_wqe128 *wqe; 830 uint32_t *wptr, *dptr; 831 832 /* 833 * Get a local pointer to the built-in wqe and correct 834 * the cmd size to match NVME's 96 bytes and fix 835 * the dma address. 836 */ 837 838 wqe = &lpfc_ncmd->cur_iocbq.wqe; 839 840 /* 841 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to 842 * match NVME. NVME sends 96 bytes. Also, use the 843 * nvme commands command and response dma addresses 844 * rather than the virtual memory to ease the restore 845 * operation. 846 */ 847 sgl = lpfc_ncmd->dma_sgl; 848 sgl->sge_len = cpu_to_le32(nCmd->cmdlen); 849 if (phba->cfg_nvme_embed_cmd) { 850 sgl->addr_hi = 0; 851 sgl->addr_lo = 0; 852 853 /* Word 0-2 - NVME CMND IU (embedded payload) */ 854 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED; 855 wqe->generic.bde.tus.f.bdeSize = 56; 856 wqe->generic.bde.addrHigh = 0; 857 wqe->generic.bde.addrLow = 64; /* Word 16 */ 858 859 /* Word 10 - dbde is 0, wqes is 1 in template */ 860 861 /* 862 * Embed the payload in the last half of the WQE 863 * WQE words 16-30 get the NVME CMD IU payload 864 * 865 * WQE words 16-19 get payload Words 1-4 866 * WQE words 20-21 get payload Words 6-7 867 * WQE words 22-29 get payload Words 16-23 868 */ 869 wptr = &wqe->words[16]; /* WQE ptr */ 870 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */ 871 dptr++; /* Skip Word 0 in payload */ 872 873 *wptr++ = *dptr++; /* Word 1 */ 874 *wptr++ = *dptr++; /* Word 2 */ 875 *wptr++ = *dptr++; /* Word 3 */ 876 *wptr++ = *dptr++; /* Word 4 */ 877 dptr++; /* Skip Word 5 in payload */ 878 *wptr++ = *dptr++; /* Word 6 */ 879 *wptr++ = *dptr++; /* Word 7 */ 880 dptr += 8; /* Skip Words 8-15 in payload */ 881 *wptr++ = *dptr++; /* Word 16 */ 882 *wptr++ = *dptr++; /* Word 17 */ 883 *wptr++ = *dptr++; /* Word 18 */ 884 *wptr++ = *dptr++; /* Word 19 */ 885 *wptr++ = *dptr++; /* Word 20 */ 886 *wptr++ = *dptr++; /* Word 21 */ 887 *wptr++ = *dptr++; /* Word 22 */ 888 *wptr = *dptr; /* Word 23 */ 889 } else { 890 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma)); 891 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma)); 892 893 /* Word 0-2 - NVME CMND IU Inline BDE */ 894 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 895 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen; 896 wqe->generic.bde.addrHigh = sgl->addr_hi; 897 wqe->generic.bde.addrLow = sgl->addr_lo; 898 899 /* Word 10 */ 900 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 901 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); 902 } 903 904 sgl++; 905 906 /* Setup the physical region for the FCP RSP */ 907 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma)); 908 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma)); 909 sgl->word2 = le32_to_cpu(sgl->word2); 910 if (nCmd->sg_cnt) 911 bf_set(lpfc_sli4_sge_last, sgl, 0); 912 else 913 bf_set(lpfc_sli4_sge_last, sgl, 1); 914 sgl->word2 = cpu_to_le32(sgl->word2); 915 sgl->sge_len = cpu_to_le32(nCmd->rsplen); 916 } 917 918 919 /* 920 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO 921 * 922 * Driver registers this routine as it io request handler. This 923 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 924 * data structure to the rport indicated in @lpfc_nvme_rport. 925 * 926 * Return value : 927 * 0 - Success 928 * TODO: What are the failure codes. 929 **/ 930 static void 931 lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 932 struct lpfc_iocbq *pwqeOut) 933 { 934 struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf; 935 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; 936 struct lpfc_vport *vport = pwqeIn->vport; 937 struct nvmefc_fcp_req *nCmd; 938 struct nvme_fc_ersp_iu *ep; 939 struct nvme_fc_cmd_iu *cp; 940 struct lpfc_nodelist *ndlp; 941 struct lpfc_nvme_fcpreq_priv *freqpriv; 942 struct lpfc_nvme_lport *lport; 943 uint32_t code, status, idx; 944 uint16_t cid, sqhd, data; 945 uint32_t *ptr; 946 uint32_t lat; 947 bool call_done = false; 948 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 949 int cpu; 950 #endif 951 int offline = 0; 952 953 /* Sanity check on return of outstanding command */ 954 if (!lpfc_ncmd) { 955 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 956 "6071 Null lpfc_ncmd pointer. No " 957 "release, skip completion\n"); 958 return; 959 } 960 961 /* Guard against abort handler being called at same time */ 962 spin_lock(&lpfc_ncmd->buf_lock); 963 964 if (!lpfc_ncmd->nvmeCmd) { 965 spin_unlock(&lpfc_ncmd->buf_lock); 966 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 967 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " 968 "nvmeCmd x%px\n", 969 lpfc_ncmd, lpfc_ncmd->nvmeCmd); 970 971 /* Release the lpfc_ncmd regardless of the missing elements. */ 972 lpfc_release_nvme_buf(phba, lpfc_ncmd); 973 return; 974 } 975 nCmd = lpfc_ncmd->nvmeCmd; 976 status = bf_get(lpfc_wcqe_c_status, wcqe); 977 978 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 979 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; 980 981 if (unlikely(status && vport->localport)) { 982 lport = (struct lpfc_nvme_lport *)vport->localport->private; 983 if (lport) { 984 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 985 atomic_inc(&lport->cmpl_fcp_xb); 986 atomic_inc(&lport->cmpl_fcp_err); 987 } 988 } 989 990 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n", 991 lpfc_ncmd->cur_iocbq.sli4_xritag, 992 status, wcqe->parameter); 993 /* 994 * Catch race where our node has transitioned, but the 995 * transport is still transitioning. 996 */ 997 ndlp = lpfc_ncmd->ndlp; 998 if (!ndlp) { 999 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1000 "6062 Ignoring NVME cmpl. No ndlp\n"); 1001 goto out_err; 1002 } 1003 1004 code = bf_get(lpfc_wcqe_c_code, wcqe); 1005 if (code == CQE_CODE_NVME_ERSP) { 1006 /* For this type of CQE, we need to rebuild the rsp */ 1007 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; 1008 1009 /* 1010 * Get Command Id from cmd to plug into response. This 1011 * code is not needed in the next NVME Transport drop. 1012 */ 1013 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; 1014 cid = cp->sqe.common.command_id; 1015 1016 /* 1017 * RSN is in CQE word 2 1018 * SQHD is in CQE Word 3 bits 15:0 1019 * Cmd Specific info is in CQE Word 1 1020 * and in CQE Word 0 bits 15:0 1021 */ 1022 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe); 1023 1024 /* Now lets build the NVME ERSP IU */ 1025 ep->iu_len = cpu_to_be16(8); 1026 ep->rsn = wcqe->parameter; 1027 ep->xfrd_len = cpu_to_be32(nCmd->payload_length); 1028 ep->rsvd12 = 0; 1029 ptr = (uint32_t *)&ep->cqe.result.u64; 1030 *ptr++ = wcqe->total_data_placed; 1031 data = bf_get(lpfc_wcqe_c_ersp0, wcqe); 1032 *ptr = (uint32_t)data; 1033 ep->cqe.sq_head = sqhd; 1034 ep->cqe.sq_id = nCmd->sqid; 1035 ep->cqe.command_id = cid; 1036 ep->cqe.status = 0; 1037 1038 lpfc_ncmd->status = IOSTAT_SUCCESS; 1039 lpfc_ncmd->result = 0; 1040 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; 1041 nCmd->transferred_length = nCmd->payload_length; 1042 } else { 1043 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK); 1044 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 1045 1046 /* For NVME, the only failure path that results in an 1047 * IO error is when the adapter rejects it. All other 1048 * conditions are a success case and resolved by the 1049 * transport. 1050 * IOSTAT_FCP_RSP_ERROR means: 1051 * 1. Length of data received doesn't match total 1052 * transfer length in WQE 1053 * 2. If the RSP payload does NOT match these cases: 1054 * a. RSP length 12/24 bytes and all zeros 1055 * b. NVME ERSP 1056 */ 1057 switch (lpfc_ncmd->status) { 1058 case IOSTAT_SUCCESS: 1059 nCmd->transferred_length = wcqe->total_data_placed; 1060 nCmd->rcv_rsplen = 0; 1061 nCmd->status = 0; 1062 break; 1063 case IOSTAT_FCP_RSP_ERROR: 1064 nCmd->transferred_length = wcqe->total_data_placed; 1065 nCmd->rcv_rsplen = wcqe->parameter; 1066 nCmd->status = 0; 1067 1068 /* Check if this is really an ERSP */ 1069 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { 1070 lpfc_ncmd->status = IOSTAT_SUCCESS; 1071 lpfc_ncmd->result = 0; 1072 1073 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1074 "6084 NVME Completion ERSP: " 1075 "xri %x placed x%x\n", 1076 lpfc_ncmd->cur_iocbq.sli4_xritag, 1077 wcqe->total_data_placed); 1078 break; 1079 } 1080 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1081 "6081 NVME Completion Protocol Error: " 1082 "xri %x status x%x result x%x " 1083 "placed x%x\n", 1084 lpfc_ncmd->cur_iocbq.sli4_xritag, 1085 lpfc_ncmd->status, lpfc_ncmd->result, 1086 wcqe->total_data_placed); 1087 break; 1088 case IOSTAT_LOCAL_REJECT: 1089 /* Let fall through to set command final state. */ 1090 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) 1091 lpfc_printf_vlog(vport, KERN_INFO, 1092 LOG_NVME_IOERR, 1093 "6032 Delay Aborted cmd x%px " 1094 "nvme cmd x%px, xri x%x, " 1095 "xb %d\n", 1096 lpfc_ncmd, nCmd, 1097 lpfc_ncmd->cur_iocbq.sli4_xritag, 1098 bf_get(lpfc_wcqe_c_xb, wcqe)); 1099 fallthrough; 1100 default: 1101 out_err: 1102 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1103 "6072 NVME Completion Error: xri %x " 1104 "status x%x result x%x [x%x] " 1105 "placed x%x\n", 1106 lpfc_ncmd->cur_iocbq.sli4_xritag, 1107 lpfc_ncmd->status, lpfc_ncmd->result, 1108 wcqe->parameter, 1109 wcqe->total_data_placed); 1110 nCmd->transferred_length = 0; 1111 nCmd->rcv_rsplen = 0; 1112 nCmd->status = NVME_SC_INTERNAL; 1113 offline = pci_channel_offline(vport->phba->pcidev); 1114 } 1115 } 1116 1117 /* pick up SLI4 exhange busy condition */ 1118 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline) 1119 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; 1120 else 1121 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; 1122 1123 /* Update stats and complete the IO. There is 1124 * no need for dma unprep because the nvme_transport 1125 * owns the dma address. 1126 */ 1127 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1128 if (lpfc_ncmd->ts_cmd_start) { 1129 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp; 1130 lpfc_ncmd->ts_data_io = ktime_get_ns(); 1131 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io; 1132 lpfc_io_ktime(phba, lpfc_ncmd); 1133 } 1134 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) { 1135 cpu = raw_smp_processor_id(); 1136 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 1137 if (lpfc_ncmd->cpu != cpu) 1138 lpfc_printf_vlog(vport, 1139 KERN_INFO, LOG_NVME_IOERR, 1140 "6701 CPU Check cmpl: " 1141 "cpu %d expect %d\n", 1142 cpu, lpfc_ncmd->cpu); 1143 } 1144 #endif 1145 1146 /* NVME targets need completion held off until the abort exchange 1147 * completes unless the NVME Rport is getting unregistered. 1148 */ 1149 1150 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 1151 freqpriv = nCmd->private; 1152 freqpriv->nvme_buf = NULL; 1153 lpfc_ncmd->nvmeCmd = NULL; 1154 call_done = true; 1155 } 1156 spin_unlock(&lpfc_ncmd->buf_lock); 1157 1158 /* Check if IO qualified for CMF */ 1159 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1160 nCmd->io_dir == NVMEFC_FCP_READ && 1161 nCmd->payload_length) { 1162 /* Used when calculating average latency */ 1163 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start; 1164 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL); 1165 } 1166 1167 if (call_done) 1168 nCmd->done(nCmd); 1169 1170 /* Call release with XB=1 to queue the IO into the abort list. */ 1171 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1172 } 1173 1174 1175 /** 1176 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO 1177 * @vport: pointer to a host virtual N_Port data structure 1178 * @lpfc_ncmd: Pointer to lpfc scsi command 1179 * @pnode: pointer to a node-list data structure 1180 * @cstat: pointer to the control status structure 1181 * 1182 * Driver registers this routine as it io request handler. This 1183 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1184 * data structure to the rport indicated in @lpfc_nvme_rport. 1185 * 1186 * Return value : 1187 * 0 - Success 1188 * TODO: What are the failure codes. 1189 **/ 1190 static int 1191 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, 1192 struct lpfc_io_buf *lpfc_ncmd, 1193 struct lpfc_nodelist *pnode, 1194 struct lpfc_fc4_ctrl_stat *cstat) 1195 { 1196 struct lpfc_hba *phba = vport->phba; 1197 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1198 struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); 1199 union lpfc_wqe128 *wqe = &pwqeq->wqe; 1200 uint32_t req_len; 1201 1202 /* 1203 * There are three possibilities here - use scatter-gather segment, use 1204 * the single mapping, or neither. 1205 */ 1206 if (nCmd->sg_cnt) { 1207 if (nCmd->io_dir == NVMEFC_FCP_WRITE) { 1208 /* From the iwrite template, initialize words 7 - 11 */ 1209 memcpy(&wqe->words[7], 1210 &lpfc_iwrite_cmd_template.words[7], 1211 sizeof(uint32_t) * 5); 1212 1213 /* Word 4 */ 1214 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length; 1215 1216 /* Word 5 */ 1217 if ((phba->cfg_nvme_enable_fb) && 1218 (pnode->nlp_flag & NLP_FIRSTBURST)) { 1219 req_len = lpfc_ncmd->nvmeCmd->payload_length; 1220 if (req_len < pnode->nvme_fb_size) 1221 wqe->fcp_iwrite.initial_xfer_len = 1222 req_len; 1223 else 1224 wqe->fcp_iwrite.initial_xfer_len = 1225 pnode->nvme_fb_size; 1226 } else { 1227 wqe->fcp_iwrite.initial_xfer_len = 0; 1228 } 1229 cstat->output_requests++; 1230 } else { 1231 /* From the iread template, initialize words 7 - 11 */ 1232 memcpy(&wqe->words[7], 1233 &lpfc_iread_cmd_template.words[7], 1234 sizeof(uint32_t) * 5); 1235 1236 /* Word 4 */ 1237 wqe->fcp_iread.total_xfer_len = nCmd->payload_length; 1238 1239 /* Word 5 */ 1240 wqe->fcp_iread.rsrvd5 = 0; 1241 1242 /* For a CMF Managed port, iod must be zero'ed */ 1243 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 1244 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 1245 LPFC_WQE_IOD_NONE); 1246 cstat->input_requests++; 1247 } 1248 } else { 1249 /* From the icmnd template, initialize words 4 - 11 */ 1250 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 1251 sizeof(uint32_t) * 8); 1252 cstat->control_requests++; 1253 } 1254 1255 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) 1256 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 1257 /* 1258 * Finish initializing those WQE fields that are independent 1259 * of the nvme_cmnd request_buffer 1260 */ 1261 1262 /* Word 3 */ 1263 bf_set(payload_offset_len, &wqe->fcp_icmd, 1264 (nCmd->rsplen + nCmd->cmdlen)); 1265 1266 /* Word 6 */ 1267 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 1268 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 1269 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 1270 1271 /* Word 8 */ 1272 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 1273 1274 /* Word 9 */ 1275 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 1276 1277 /* Word 10 */ 1278 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG); 1279 1280 /* Words 13 14 15 are for PBDE support */ 1281 1282 pwqeq->vport = vport; 1283 return 0; 1284 } 1285 1286 1287 /** 1288 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO 1289 * @vport: pointer to a host virtual N_Port data structure 1290 * @lpfc_ncmd: Pointer to lpfc scsi command 1291 * 1292 * Driver registers this routine as it io request handler. This 1293 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1294 * data structure to the rport indicated in @lpfc_nvme_rport. 1295 * 1296 * Return value : 1297 * 0 - Success 1298 * TODO: What are the failure codes. 1299 **/ 1300 static int 1301 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, 1302 struct lpfc_io_buf *lpfc_ncmd) 1303 { 1304 struct lpfc_hba *phba = vport->phba; 1305 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; 1306 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; 1307 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; 1308 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1309 struct scatterlist *data_sg; 1310 struct sli4_sge *first_data_sgl; 1311 struct ulp_bde64 *bde; 1312 dma_addr_t physaddr = 0; 1313 uint32_t dma_len = 0; 1314 uint32_t dma_offset = 0; 1315 int nseg, i, j; 1316 bool lsp_just_set = false; 1317 1318 /* Fix up the command and response DMA stuff. */ 1319 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); 1320 1321 /* 1322 * There are three possibilities here - use scatter-gather segment, use 1323 * the single mapping, or neither. 1324 */ 1325 if (nCmd->sg_cnt) { 1326 /* 1327 * Jump over the cmd and rsp SGEs. The fix routine 1328 * has already adjusted for this. 1329 */ 1330 sgl += 2; 1331 1332 first_data_sgl = sgl; 1333 lpfc_ncmd->seg_cnt = nCmd->sg_cnt; 1334 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) { 1335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1336 "6058 Too many sg segments from " 1337 "NVME Transport. Max %d, " 1338 "nvmeIO sg_cnt %d\n", 1339 phba->cfg_nvme_seg_cnt + 1, 1340 lpfc_ncmd->seg_cnt); 1341 lpfc_ncmd->seg_cnt = 0; 1342 return 1; 1343 } 1344 1345 /* 1346 * The driver established a maximum scatter-gather segment count 1347 * during probe that limits the number of sg elements in any 1348 * single nvme command. Just run through the seg_cnt and format 1349 * the sge's. 1350 */ 1351 nseg = nCmd->sg_cnt; 1352 data_sg = nCmd->first_sgl; 1353 1354 /* for tracking the segment boundaries */ 1355 j = 2; 1356 for (i = 0; i < nseg; i++) { 1357 if (data_sg == NULL) { 1358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1359 "6059 dptr err %d, nseg %d\n", 1360 i, nseg); 1361 lpfc_ncmd->seg_cnt = 0; 1362 return 1; 1363 } 1364 1365 sgl->word2 = 0; 1366 if (nseg == 1) { 1367 bf_set(lpfc_sli4_sge_last, sgl, 1); 1368 bf_set(lpfc_sli4_sge_type, sgl, 1369 LPFC_SGE_TYPE_DATA); 1370 } else { 1371 bf_set(lpfc_sli4_sge_last, sgl, 0); 1372 1373 /* expand the segment */ 1374 if (!lsp_just_set && 1375 !((j + 1) % phba->border_sge_num) && 1376 ((nseg - 1) != i)) { 1377 /* set LSP type */ 1378 bf_set(lpfc_sli4_sge_type, sgl, 1379 LPFC_SGE_TYPE_LSP); 1380 1381 sgl_xtra = lpfc_get_sgl_per_hdwq( 1382 phba, lpfc_ncmd); 1383 1384 if (unlikely(!sgl_xtra)) { 1385 lpfc_ncmd->seg_cnt = 0; 1386 return 1; 1387 } 1388 sgl->addr_lo = cpu_to_le32(putPaddrLow( 1389 sgl_xtra->dma_phys_sgl)); 1390 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 1391 sgl_xtra->dma_phys_sgl)); 1392 1393 } else { 1394 bf_set(lpfc_sli4_sge_type, sgl, 1395 LPFC_SGE_TYPE_DATA); 1396 } 1397 } 1398 1399 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 1400 LPFC_SGE_TYPE_LSP)) { 1401 if ((nseg - 1) == i) 1402 bf_set(lpfc_sli4_sge_last, sgl, 1); 1403 1404 physaddr = sg_dma_address(data_sg); 1405 dma_len = sg_dma_len(data_sg); 1406 sgl->addr_lo = cpu_to_le32( 1407 putPaddrLow(physaddr)); 1408 sgl->addr_hi = cpu_to_le32( 1409 putPaddrHigh(physaddr)); 1410 1411 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1412 sgl->word2 = cpu_to_le32(sgl->word2); 1413 sgl->sge_len = cpu_to_le32(dma_len); 1414 1415 dma_offset += dma_len; 1416 data_sg = sg_next(data_sg); 1417 1418 sgl++; 1419 1420 lsp_just_set = false; 1421 } else { 1422 sgl->word2 = cpu_to_le32(sgl->word2); 1423 1424 sgl->sge_len = cpu_to_le32( 1425 phba->cfg_sg_dma_buf_size); 1426 1427 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 1428 i = i - 1; 1429 1430 lsp_just_set = true; 1431 } 1432 1433 j++; 1434 } 1435 1436 /* PBDE support for first data SGE only */ 1437 if (nseg == 1 && phba->cfg_enable_pbde) { 1438 /* Words 13-15 */ 1439 bde = (struct ulp_bde64 *) 1440 &wqe->words[13]; 1441 bde->addrLow = first_data_sgl->addr_lo; 1442 bde->addrHigh = first_data_sgl->addr_hi; 1443 bde->tus.f.bdeSize = 1444 le32_to_cpu(first_data_sgl->sge_len); 1445 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1446 bde->tus.w = cpu_to_le32(bde->tus.w); 1447 1448 /* Word 11 - set PBDE bit */ 1449 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 1450 } else { 1451 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 1452 /* Word 11 - PBDE bit disabled by default template */ 1453 } 1454 1455 } else { 1456 lpfc_ncmd->seg_cnt = 0; 1457 1458 /* For this clause to be valid, the payload_length 1459 * and sg_cnt must zero. 1460 */ 1461 if (nCmd->payload_length != 0) { 1462 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1463 "6063 NVME DMA Prep Err: sg_cnt %d " 1464 "payload_length x%x\n", 1465 nCmd->sg_cnt, nCmd->payload_length); 1466 return 1; 1467 } 1468 } 1469 return 0; 1470 } 1471 1472 /** 1473 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO 1474 * @pnvme_lport: Pointer to the driver's local port data 1475 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1476 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1477 * @pnvme_fcreq: IO request from nvme fc to driver. 1478 * 1479 * Driver registers this routine as it io request handler. This 1480 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq 1481 * data structure to the rport indicated in @lpfc_nvme_rport. 1482 * 1483 * Return value : 1484 * 0 - Success 1485 * TODO: What are the failure codes. 1486 **/ 1487 static int 1488 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, 1489 struct nvme_fc_remote_port *pnvme_rport, 1490 void *hw_queue_handle, 1491 struct nvmefc_fcp_req *pnvme_fcreq) 1492 { 1493 int ret = 0; 1494 int expedite = 0; 1495 int idx, cpu; 1496 struct lpfc_nvme_lport *lport; 1497 struct lpfc_fc4_ctrl_stat *cstat; 1498 struct lpfc_vport *vport; 1499 struct lpfc_hba *phba; 1500 struct lpfc_nodelist *ndlp; 1501 struct lpfc_io_buf *lpfc_ncmd; 1502 struct lpfc_nvme_rport *rport; 1503 struct lpfc_nvme_qhandle *lpfc_queue_info; 1504 struct lpfc_nvme_fcpreq_priv *freqpriv; 1505 struct nvme_common_command *sqe; 1506 uint64_t start = 0; 1507 1508 /* Validate pointers. LLDD fault handling with transport does 1509 * have timing races. 1510 */ 1511 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1512 if (unlikely(!lport)) { 1513 ret = -EINVAL; 1514 goto out_fail; 1515 } 1516 1517 vport = lport->vport; 1518 1519 if (unlikely(!hw_queue_handle)) { 1520 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1521 "6117 Fail IO, NULL hw_queue_handle\n"); 1522 atomic_inc(&lport->xmt_fcp_err); 1523 ret = -EBUSY; 1524 goto out_fail; 1525 } 1526 1527 phba = vport->phba; 1528 1529 if ((unlikely(vport->load_flag & FC_UNLOADING)) || 1530 phba->hba_flag & HBA_IOQ_FLUSH) { 1531 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1532 "6124 Fail IO, Driver unload\n"); 1533 atomic_inc(&lport->xmt_fcp_err); 1534 ret = -ENODEV; 1535 goto out_fail; 1536 } 1537 1538 freqpriv = pnvme_fcreq->private; 1539 if (unlikely(!freqpriv)) { 1540 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1541 "6158 Fail IO, NULL request data\n"); 1542 atomic_inc(&lport->xmt_fcp_err); 1543 ret = -EINVAL; 1544 goto out_fail; 1545 } 1546 1547 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1548 if (phba->ktime_on) 1549 start = ktime_get_ns(); 1550 #endif 1551 rport = (struct lpfc_nvme_rport *)pnvme_rport->private; 1552 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle; 1553 1554 /* 1555 * Catch race where our node has transitioned, but the 1556 * transport is still transitioning. 1557 */ 1558 ndlp = rport->ndlp; 1559 if (!ndlp) { 1560 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1561 "6053 Busy IO, ndlp not ready: rport x%px " 1562 "ndlp x%px, DID x%06x\n", 1563 rport, ndlp, pnvme_rport->port_id); 1564 atomic_inc(&lport->xmt_fcp_err); 1565 ret = -EBUSY; 1566 goto out_fail; 1567 } 1568 1569 /* The remote node has to be a mapped target or it's an error. */ 1570 if ((ndlp->nlp_type & NLP_NVME_TARGET) && 1571 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, 1573 "6036 Fail IO, DID x%06x not ready for " 1574 "IO. State x%x, Type x%x Flg x%x\n", 1575 pnvme_rport->port_id, 1576 ndlp->nlp_state, ndlp->nlp_type, 1577 ndlp->fc4_xpt_flags); 1578 atomic_inc(&lport->xmt_fcp_bad_ndlp); 1579 ret = -EBUSY; 1580 goto out_fail; 1581 1582 } 1583 1584 /* Currently only NVME Keep alive commands should be expedited 1585 * if the driver runs out of a resource. These should only be 1586 * issued on the admin queue, qidx 0 1587 */ 1588 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) { 1589 sqe = &((struct nvme_fc_cmd_iu *) 1590 pnvme_fcreq->cmdaddr)->sqe.common; 1591 if (sqe->opcode == nvme_admin_keep_alive) 1592 expedite = 1; 1593 } 1594 1595 /* Check if IO qualifies for CMF */ 1596 if (phba->cmf_active_mode != LPFC_CFG_OFF && 1597 pnvme_fcreq->io_dir == NVMEFC_FCP_READ && 1598 pnvme_fcreq->payload_length) { 1599 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length); 1600 if (ret) { 1601 ret = -EBUSY; 1602 goto out_fail; 1603 } 1604 /* Get start time for IO latency */ 1605 start = ktime_get_ns(); 1606 } 1607 1608 /* The node is shared with FCP IO, make sure the IO pending count does 1609 * not exceed the programmed depth. 1610 */ 1611 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 1612 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && 1613 !expedite) { 1614 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1615 "6174 Fail IO, ndlp qdepth exceeded: " 1616 "idx %d DID %x pend %d qdepth %d\n", 1617 lpfc_queue_info->index, ndlp->nlp_DID, 1618 atomic_read(&ndlp->cmd_pending), 1619 ndlp->cmd_qdepth); 1620 atomic_inc(&lport->xmt_fcp_qdepth); 1621 ret = -EBUSY; 1622 goto out_fail1; 1623 } 1624 } 1625 1626 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ 1627 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 1628 idx = lpfc_queue_info->index; 1629 } else { 1630 cpu = raw_smp_processor_id(); 1631 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 1632 } 1633 1634 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); 1635 if (lpfc_ncmd == NULL) { 1636 atomic_inc(&lport->xmt_fcp_noxri); 1637 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1638 "6065 Fail IO, driver buffer pool is empty: " 1639 "idx %d DID %x\n", 1640 lpfc_queue_info->index, ndlp->nlp_DID); 1641 ret = -EBUSY; 1642 goto out_fail1; 1643 } 1644 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1645 if (start) { 1646 lpfc_ncmd->ts_cmd_start = start; 1647 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd; 1648 } else { 1649 lpfc_ncmd->ts_cmd_start = 0; 1650 } 1651 #endif 1652 lpfc_ncmd->rx_cmd_start = start; 1653 1654 /* 1655 * Store the data needed by the driver to issue, abort, and complete 1656 * an IO. 1657 * Do not let the IO hang out forever. There is no midlayer issuing 1658 * an abort so inform the FW of the maximum IO pending time. 1659 */ 1660 freqpriv->nvme_buf = lpfc_ncmd; 1661 lpfc_ncmd->nvmeCmd = pnvme_fcreq; 1662 lpfc_ncmd->ndlp = ndlp; 1663 lpfc_ncmd->qidx = lpfc_queue_info->qidx; 1664 1665 /* 1666 * Issue the IO on the WQ indicated by index in the hw_queue_handle. 1667 * This identfier was create in our hardware queue create callback 1668 * routine. The driver now is dependent on the IO queue steering from 1669 * the transport. We are trusting the upper NVME layers know which 1670 * index to use and that they have affinitized a CPU to this hardware 1671 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. 1672 */ 1673 lpfc_ncmd->cur_iocbq.hba_wqidx = idx; 1674 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; 1675 1676 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); 1677 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); 1678 if (ret) { 1679 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1680 "6175 Fail IO, Prep DMA: " 1681 "idx %d DID %x\n", 1682 lpfc_queue_info->index, ndlp->nlp_DID); 1683 atomic_inc(&lport->xmt_fcp_err); 1684 ret = -ENOMEM; 1685 goto out_free_nvme_buf; 1686 } 1687 1688 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", 1689 lpfc_ncmd->cur_iocbq.sli4_xritag, 1690 lpfc_queue_info->index, ndlp->nlp_DID); 1691 1692 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); 1693 if (ret) { 1694 atomic_inc(&lport->xmt_fcp_wqerr); 1695 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 1696 "6113 Fail IO, Could not issue WQE err %x " 1697 "sid: x%x did: x%x oxid: x%x\n", 1698 ret, vport->fc_myDID, ndlp->nlp_DID, 1699 lpfc_ncmd->cur_iocbq.sli4_xritag); 1700 goto out_free_nvme_buf; 1701 } 1702 1703 if (phba->cfg_xri_rebalancing) 1704 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); 1705 1706 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1707 if (lpfc_ncmd->ts_cmd_start) 1708 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); 1709 1710 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) { 1711 cpu = raw_smp_processor_id(); 1712 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 1713 lpfc_ncmd->cpu = cpu; 1714 if (idx != cpu) 1715 lpfc_printf_vlog(vport, 1716 KERN_INFO, LOG_NVME_IOERR, 1717 "6702 CPU Check cmd: " 1718 "cpu %d wq %d\n", 1719 lpfc_ncmd->cpu, 1720 lpfc_queue_info->index); 1721 } 1722 #endif 1723 return 0; 1724 1725 out_free_nvme_buf: 1726 if (lpfc_ncmd->nvmeCmd->sg_cnt) { 1727 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) 1728 cstat->output_requests--; 1729 else 1730 cstat->input_requests--; 1731 } else 1732 cstat->control_requests--; 1733 lpfc_release_nvme_buf(phba, lpfc_ncmd); 1734 out_fail1: 1735 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, 1736 pnvme_fcreq->payload_length, NULL); 1737 out_fail: 1738 return ret; 1739 } 1740 1741 /** 1742 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. 1743 * @phba: Pointer to HBA context object 1744 * @cmdiocb: Pointer to command iocb object. 1745 * @abts_cmpl: Pointer to wcqe complete object. 1746 * 1747 * This is the callback function for any NVME FCP IO that was aborted. 1748 * 1749 * Return value: 1750 * None 1751 **/ 1752 void 1753 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1754 struct lpfc_wcqe_complete *abts_cmpl) 1755 { 1756 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 1757 "6145 ABORT_XRI_CN completing on rpi x%x " 1758 "original iotag x%x, abort cmd iotag x%x " 1759 "req_tag x%x, status x%x, hwstatus x%x\n", 1760 bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com), 1761 get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag, 1762 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl), 1763 bf_get(lpfc_wcqe_c_status, abts_cmpl), 1764 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl)); 1765 lpfc_sli_release_iocbq(phba, cmdiocb); 1766 } 1767 1768 /** 1769 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS 1770 * @pnvme_lport: Pointer to the driver's local port data 1771 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq 1772 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue 1773 * @pnvme_fcreq: IO request from nvme fc to driver. 1774 * 1775 * Driver registers this routine as its nvme request io abort handler. This 1776 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq 1777 * data structure to the rport indicated in @lpfc_nvme_rport. This routine 1778 * is executed asynchronously - one the target is validated as "MAPPED" and 1779 * ready for IO, the driver issues the abort request and returns. 1780 * 1781 * Return value: 1782 * None 1783 **/ 1784 static void 1785 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, 1786 struct nvme_fc_remote_port *pnvme_rport, 1787 void *hw_queue_handle, 1788 struct nvmefc_fcp_req *pnvme_fcreq) 1789 { 1790 struct lpfc_nvme_lport *lport; 1791 struct lpfc_vport *vport; 1792 struct lpfc_hba *phba; 1793 struct lpfc_io_buf *lpfc_nbuf; 1794 struct lpfc_iocbq *nvmereq_wqe; 1795 struct lpfc_nvme_fcpreq_priv *freqpriv; 1796 unsigned long flags; 1797 int ret_val; 1798 1799 /* Validate pointers. LLDD fault handling with transport does 1800 * have timing races. 1801 */ 1802 lport = (struct lpfc_nvme_lport *)pnvme_lport->private; 1803 if (unlikely(!lport)) 1804 return; 1805 1806 vport = lport->vport; 1807 1808 if (unlikely(!hw_queue_handle)) { 1809 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1810 "6129 Fail Abort, HW Queue Handle NULL.\n"); 1811 return; 1812 } 1813 1814 phba = vport->phba; 1815 freqpriv = pnvme_fcreq->private; 1816 1817 if (unlikely(!freqpriv)) 1818 return; 1819 if (vport->load_flag & FC_UNLOADING) 1820 return; 1821 1822 /* Announce entry to new IO submit field. */ 1823 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1824 "6002 Abort Request to rport DID x%06x " 1825 "for nvme_fc_req x%px\n", 1826 pnvme_rport->port_id, 1827 pnvme_fcreq); 1828 1829 /* If the hba is getting reset, this flag is set. It is 1830 * cleared when the reset is complete and rings reestablished. 1831 */ 1832 spin_lock_irqsave(&phba->hbalock, flags); 1833 /* driver queued commands are in process of being flushed */ 1834 if (phba->hba_flag & HBA_IOQ_FLUSH) { 1835 spin_unlock_irqrestore(&phba->hbalock, flags); 1836 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1837 "6139 Driver in reset cleanup - flushing " 1838 "NVME Req now. hba_flag x%x\n", 1839 phba->hba_flag); 1840 return; 1841 } 1842 1843 lpfc_nbuf = freqpriv->nvme_buf; 1844 if (!lpfc_nbuf) { 1845 spin_unlock_irqrestore(&phba->hbalock, flags); 1846 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1847 "6140 NVME IO req has no matching lpfc nvme " 1848 "io buffer. Skipping abort req.\n"); 1849 return; 1850 } else if (!lpfc_nbuf->nvmeCmd) { 1851 spin_unlock_irqrestore(&phba->hbalock, flags); 1852 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1853 "6141 lpfc NVME IO req has no nvme_fcreq " 1854 "io buffer. Skipping abort req.\n"); 1855 return; 1856 } 1857 nvmereq_wqe = &lpfc_nbuf->cur_iocbq; 1858 1859 /* Guard against IO completion being called at same time */ 1860 spin_lock(&lpfc_nbuf->buf_lock); 1861 1862 /* 1863 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's 1864 * state must match the nvme_fcreq passed by the nvme 1865 * transport. If they don't match, it is likely the driver 1866 * has already completed the NVME IO and the nvme transport 1867 * has not seen it yet. 1868 */ 1869 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { 1870 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1871 "6143 NVME req mismatch: " 1872 "lpfc_nbuf x%px nvmeCmd x%px, " 1873 "pnvme_fcreq x%px. Skipping Abort xri x%x\n", 1874 lpfc_nbuf, lpfc_nbuf->nvmeCmd, 1875 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1876 goto out_unlock; 1877 } 1878 1879 /* Don't abort IOs no longer on the pending queue. */ 1880 if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 1881 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1882 "6142 NVME IO req x%px not queued - skipping " 1883 "abort req xri x%x\n", 1884 pnvme_fcreq, nvmereq_wqe->sli4_xritag); 1885 goto out_unlock; 1886 } 1887 1888 atomic_inc(&lport->xmt_fcp_abort); 1889 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n", 1890 nvmereq_wqe->sli4_xritag, 1891 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id); 1892 1893 /* Outstanding abort is in progress */ 1894 if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) { 1895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1896 "6144 Outstanding NVME I/O Abort Request " 1897 "still pending on nvme_fcreq x%px, " 1898 "lpfc_ncmd x%px xri x%x\n", 1899 pnvme_fcreq, lpfc_nbuf, 1900 nvmereq_wqe->sli4_xritag); 1901 goto out_unlock; 1902 } 1903 1904 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, 1905 lpfc_nvme_abort_fcreq_cmpl); 1906 1907 spin_unlock(&lpfc_nbuf->buf_lock); 1908 spin_unlock_irqrestore(&phba->hbalock, flags); 1909 1910 /* Make sure HBA is alive */ 1911 lpfc_issue_hb_tmo(phba); 1912 1913 if (ret_val != WQE_SUCCESS) { 1914 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1915 "6137 Failed abts issue_wqe with status x%x " 1916 "for nvme_fcreq x%px.\n", 1917 ret_val, pnvme_fcreq); 1918 return; 1919 } 1920 1921 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, 1922 "6138 Transport Abort NVME Request Issued for " 1923 "ox_id x%x\n", 1924 nvmereq_wqe->sli4_xritag); 1925 return; 1926 1927 out_unlock: 1928 spin_unlock(&lpfc_nbuf->buf_lock); 1929 spin_unlock_irqrestore(&phba->hbalock, flags); 1930 return; 1931 } 1932 1933 /* Declare and initialization an instance of the FC NVME template. */ 1934 static struct nvme_fc_port_template lpfc_nvme_template = { 1935 /* initiator-based functions */ 1936 .localport_delete = lpfc_nvme_localport_delete, 1937 .remoteport_delete = lpfc_nvme_remoteport_delete, 1938 .create_queue = lpfc_nvme_create_queue, 1939 .delete_queue = lpfc_nvme_delete_queue, 1940 .ls_req = lpfc_nvme_ls_req, 1941 .fcp_io = lpfc_nvme_fcp_io_submit, 1942 .ls_abort = lpfc_nvme_ls_abort, 1943 .fcp_abort = lpfc_nvme_fcp_abort, 1944 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp, 1945 1946 .max_hw_queues = 1, 1947 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1948 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS, 1949 .dma_boundary = 0xFFFFFFFF, 1950 1951 /* Sizes of additional private data for data structures. 1952 * No use for the last two sizes at this time. 1953 */ 1954 .local_priv_sz = sizeof(struct lpfc_nvme_lport), 1955 .remote_priv_sz = sizeof(struct lpfc_nvme_rport), 1956 .lsrqst_priv_sz = 0, 1957 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv), 1958 }; 1959 1960 /* 1961 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA 1962 * 1963 * This routine removes a nvme buffer from head of @hdwq io_buf_list 1964 * and returns to caller. 1965 * 1966 * Return codes: 1967 * NULL - Error 1968 * Pointer to lpfc_nvme_buf - Success 1969 **/ 1970 static struct lpfc_io_buf * 1971 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1972 int idx, int expedite) 1973 { 1974 struct lpfc_io_buf *lpfc_ncmd; 1975 struct lpfc_sli4_hdw_queue *qp; 1976 struct sli4_sge *sgl; 1977 struct lpfc_iocbq *pwqeq; 1978 union lpfc_wqe128 *wqe; 1979 1980 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); 1981 1982 if (lpfc_ncmd) { 1983 pwqeq = &(lpfc_ncmd->cur_iocbq); 1984 wqe = &pwqeq->wqe; 1985 1986 /* Setup key fields in buffer that may have been changed 1987 * if other protocols used this buffer. 1988 */ 1989 pwqeq->cmd_flag = LPFC_IO_NVME; 1990 pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl; 1991 lpfc_ncmd->start_time = jiffies; 1992 lpfc_ncmd->flags = 0; 1993 1994 /* Rsp SGE will be filled in when we rcv an IO 1995 * from the NVME Layer to be sent. 1996 * The cmd is going to be embedded so we need a SKIP SGE. 1997 */ 1998 sgl = lpfc_ncmd->dma_sgl; 1999 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2000 bf_set(lpfc_sli4_sge_last, sgl, 0); 2001 sgl->word2 = cpu_to_le32(sgl->word2); 2002 /* Fill in word 3 / sgl_len during cmd submission */ 2003 2004 /* Initialize 64 bytes only */ 2005 memset(wqe, 0, sizeof(union lpfc_wqe)); 2006 2007 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 2008 atomic_inc(&ndlp->cmd_pending); 2009 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 2010 } 2011 2012 } else { 2013 qp = &phba->sli4_hba.hdwq[idx]; 2014 qp->empty_io_bufs++; 2015 } 2016 2017 return lpfc_ncmd; 2018 } 2019 2020 /** 2021 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list. 2022 * @phba: The Hba for which this call is being executed. 2023 * @lpfc_ncmd: The nvme buffer which is being released. 2024 * 2025 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba 2026 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer 2027 * and cannot be reused for at least RA_TOV amount of time if it was 2028 * aborted. 2029 **/ 2030 static void 2031 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) 2032 { 2033 struct lpfc_sli4_hdw_queue *qp; 2034 unsigned long iflag = 0; 2035 2036 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) 2037 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); 2038 2039 lpfc_ncmd->ndlp = NULL; 2040 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 2041 2042 qp = lpfc_ncmd->hdwq; 2043 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { 2044 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2045 "6310 XB release deferred for " 2046 "ox_id x%x on reqtag x%x\n", 2047 lpfc_ncmd->cur_iocbq.sli4_xritag, 2048 lpfc_ncmd->cur_iocbq.iotag); 2049 2050 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 2051 list_add_tail(&lpfc_ncmd->list, 2052 &qp->lpfc_abts_io_buf_list); 2053 qp->abts_nvme_io_bufs++; 2054 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 2055 } else 2056 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); 2057 } 2058 2059 /** 2060 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance. 2061 * @vport: the lpfc_vport instance requesting a localport. 2062 * 2063 * This routine is invoked to create an nvme localport instance to bind 2064 * to the nvme_fc_transport. It is called once during driver load 2065 * like lpfc_create_shost after all other services are initialized. 2066 * It requires a vport, vpi, and wwns at call time. Other localport 2067 * parameters are modified as the driver's FCID and the Fabric WWN 2068 * are established. 2069 * 2070 * Return codes 2071 * 0 - successful 2072 * -ENOMEM - no heap memory available 2073 * other values - from nvme registration upcall 2074 **/ 2075 int 2076 lpfc_nvme_create_localport(struct lpfc_vport *vport) 2077 { 2078 int ret = 0; 2079 struct lpfc_hba *phba = vport->phba; 2080 struct nvme_fc_port_info nfcp_info; 2081 struct nvme_fc_local_port *localport; 2082 struct lpfc_nvme_lport *lport; 2083 2084 /* Initialize this localport instance. The vport wwn usage ensures 2085 * that NPIV is accounted for. 2086 */ 2087 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info)); 2088 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR; 2089 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 2090 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 2091 2092 /* We need to tell the transport layer + 1 because it takes page 2093 * alignment into account. When space for the SGL is allocated we 2094 * allocate + 3, one for cmd, one for rsp and one for this alignment 2095 */ 2096 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 2097 2098 /* Advertise how many hw queues we support based on cfg_hdw_queue, 2099 * which will not exceed cpu count. 2100 */ 2101 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; 2102 2103 if (!IS_ENABLED(CONFIG_NVME_FC)) 2104 return ret; 2105 2106 /* localport is allocated from the stack, but the registration 2107 * call allocates heap memory as well as the private area. 2108 */ 2109 2110 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2111 &vport->phba->pcidev->dev, &localport); 2112 if (!ret) { 2113 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, 2114 "6005 Successfully registered local " 2115 "NVME port num %d, localP x%px, private " 2116 "x%px, sg_seg %d\n", 2117 localport->port_num, localport, 2118 localport->private, 2119 lpfc_nvme_template.max_sgl_segments); 2120 2121 /* Private is our lport size declared in the template. */ 2122 lport = (struct lpfc_nvme_lport *)localport->private; 2123 vport->localport = localport; 2124 lport->vport = vport; 2125 vport->nvmei_support = 1; 2126 2127 atomic_set(&lport->xmt_fcp_noxri, 0); 2128 atomic_set(&lport->xmt_fcp_bad_ndlp, 0); 2129 atomic_set(&lport->xmt_fcp_qdepth, 0); 2130 atomic_set(&lport->xmt_fcp_err, 0); 2131 atomic_set(&lport->xmt_fcp_wqerr, 0); 2132 atomic_set(&lport->xmt_fcp_abort, 0); 2133 atomic_set(&lport->xmt_ls_abort, 0); 2134 atomic_set(&lport->xmt_ls_err, 0); 2135 atomic_set(&lport->cmpl_fcp_xb, 0); 2136 atomic_set(&lport->cmpl_fcp_err, 0); 2137 atomic_set(&lport->cmpl_ls_xb, 0); 2138 atomic_set(&lport->cmpl_ls_err, 0); 2139 2140 atomic_set(&lport->fc4NvmeLsRequests, 0); 2141 atomic_set(&lport->fc4NvmeLsCmpls, 0); 2142 } 2143 2144 return ret; 2145 } 2146 2147 #if (IS_ENABLED(CONFIG_NVME_FC)) 2148 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg. 2149 * 2150 * The driver has to wait for the host nvme transport to callback 2151 * indicating the localport has successfully unregistered all 2152 * resources. Since this is an uninterruptible wait, loop every ten 2153 * seconds and print a message indicating no progress. 2154 * 2155 * An uninterruptible wait is used because of the risk of transport-to- 2156 * driver state mismatch. 2157 */ 2158 static void 2159 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2160 struct lpfc_nvme_lport *lport, 2161 struct completion *lport_unreg_cmp) 2162 { 2163 u32 wait_tmo; 2164 int ret, i, pending = 0; 2165 struct lpfc_sli_ring *pring; 2166 struct lpfc_hba *phba = vport->phba; 2167 struct lpfc_sli4_hdw_queue *qp; 2168 int abts_scsi, abts_nvme; 2169 2170 /* Host transport has to clean up and confirm requiring an indefinite 2171 * wait. Print a message if a 10 second wait expires and renew the 2172 * wait. This is unexpected. 2173 */ 2174 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); 2175 while (true) { 2176 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); 2177 if (unlikely(!ret)) { 2178 pending = 0; 2179 abts_scsi = 0; 2180 abts_nvme = 0; 2181 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2182 qp = &phba->sli4_hba.hdwq[i]; 2183 if (!vport->localport || !qp || !qp->io_wq) 2184 return; 2185 2186 pring = qp->io_wq->pring; 2187 if (!pring) 2188 continue; 2189 pending += pring->txcmplq_cnt; 2190 abts_scsi += qp->abts_scsi_io_bufs; 2191 abts_nvme += qp->abts_nvme_io_bufs; 2192 } 2193 if (!vport->localport || 2194 test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) || 2195 vport->load_flag & FC_UNLOADING) 2196 return; 2197 2198 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2199 "6176 Lport x%px Localport x%px wait " 2200 "timed out. Pending %d [%d:%d]. " 2201 "Renewing.\n", 2202 lport, vport->localport, pending, 2203 abts_scsi, abts_nvme); 2204 continue; 2205 } 2206 break; 2207 } 2208 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 2209 "6177 Lport x%px Localport x%px Complete Success\n", 2210 lport, vport->localport); 2211 } 2212 #endif 2213 2214 /** 2215 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport. 2216 * @vport: pointer to a host virtual N_Port data structure 2217 * 2218 * This routine is invoked to destroy all lports bound to the phba. 2219 * The lport memory was allocated by the nvme fc transport and is 2220 * released there. This routine ensures all rports bound to the 2221 * lport have been disconnected. 2222 * 2223 **/ 2224 void 2225 lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2226 { 2227 #if (IS_ENABLED(CONFIG_NVME_FC)) 2228 struct nvme_fc_local_port *localport; 2229 struct lpfc_nvme_lport *lport; 2230 int ret; 2231 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); 2232 2233 if (vport->nvmei_support == 0) 2234 return; 2235 2236 localport = vport->localport; 2237 if (!localport) 2238 return; 2239 lport = (struct lpfc_nvme_lport *)localport->private; 2240 2241 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2242 "6011 Destroying NVME localport x%px\n", 2243 localport); 2244 2245 /* lport's rport list is clear. Unregister 2246 * lport and release resources. 2247 */ 2248 lport->lport_unreg_cmp = &lport_unreg_cmp; 2249 ret = nvme_fc_unregister_localport(localport); 2250 2251 /* Wait for completion. This either blocks 2252 * indefinitely or succeeds 2253 */ 2254 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); 2255 vport->localport = NULL; 2256 2257 /* Regardless of the unregister upcall response, clear 2258 * nvmei_support. All rports are unregistered and the 2259 * driver will clean up. 2260 */ 2261 vport->nvmei_support = 0; 2262 if (ret == 0) { 2263 lpfc_printf_vlog(vport, 2264 KERN_INFO, LOG_NVME_DISC, 2265 "6009 Unregistered lport Success\n"); 2266 } else { 2267 lpfc_printf_vlog(vport, 2268 KERN_INFO, LOG_NVME_DISC, 2269 "6010 Unregistered lport " 2270 "Failed, status x%x\n", 2271 ret); 2272 } 2273 #endif 2274 } 2275 2276 void 2277 lpfc_nvme_update_localport(struct lpfc_vport *vport) 2278 { 2279 #if (IS_ENABLED(CONFIG_NVME_FC)) 2280 struct nvme_fc_local_port *localport; 2281 struct lpfc_nvme_lport *lport; 2282 2283 localport = vport->localport; 2284 if (!localport) { 2285 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2286 "6710 Update NVME fail. No localport\n"); 2287 return; 2288 } 2289 lport = (struct lpfc_nvme_lport *)localport->private; 2290 if (!lport) { 2291 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, 2292 "6171 Update NVME fail. localP x%px, No lport\n", 2293 localport); 2294 return; 2295 } 2296 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 2297 "6012 Update NVME lport x%px did x%x\n", 2298 localport, vport->fc_myDID); 2299 2300 localport->port_id = vport->fc_myDID; 2301 if (localport->port_id == 0) 2302 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY; 2303 else 2304 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; 2305 2306 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2307 "6030 bound lport x%px to DID x%06x\n", 2308 lport, localport->port_id); 2309 #endif 2310 } 2311 2312 int 2313 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2314 { 2315 #if (IS_ENABLED(CONFIG_NVME_FC)) 2316 int ret = 0; 2317 struct nvme_fc_local_port *localport; 2318 struct lpfc_nvme_lport *lport; 2319 struct lpfc_nvme_rport *rport; 2320 struct lpfc_nvme_rport *oldrport; 2321 struct nvme_fc_remote_port *remote_port; 2322 struct nvme_fc_port_info rpinfo; 2323 struct lpfc_nodelist *prev_ndlp = NULL; 2324 struct fc_rport *srport = ndlp->rport; 2325 2326 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, 2327 "6006 Register NVME PORT. DID x%06x nlptype x%x\n", 2328 ndlp->nlp_DID, ndlp->nlp_type); 2329 2330 localport = vport->localport; 2331 if (!localport) 2332 return 0; 2333 2334 lport = (struct lpfc_nvme_lport *)localport->private; 2335 2336 /* NVME rports are not preserved across devloss. 2337 * Just register this instance. Note, rpinfo->dev_loss_tmo 2338 * is left 0 to indicate accept transport defaults. The 2339 * driver communicates port role capabilities consistent 2340 * with the PRLI response data. 2341 */ 2342 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info)); 2343 rpinfo.port_id = ndlp->nlp_DID; 2344 if (ndlp->nlp_type & NLP_NVME_TARGET) 2345 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET; 2346 if (ndlp->nlp_type & NLP_NVME_INITIATOR) 2347 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR; 2348 2349 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) 2350 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY; 2351 2352 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2353 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2354 if (srport) 2355 rpinfo.dev_loss_tmo = srport->dev_loss_tmo; 2356 else 2357 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo; 2358 2359 spin_lock_irq(&ndlp->lock); 2360 2361 /* If an oldrport exists, so does the ndlp reference. If not 2362 * a new reference is needed because either the node has never 2363 * been registered or it's been unregistered and getting deleted. 2364 */ 2365 oldrport = lpfc_ndlp_get_nrport(ndlp); 2366 if (oldrport) { 2367 prev_ndlp = oldrport->ndlp; 2368 spin_unlock_irq(&ndlp->lock); 2369 } else { 2370 spin_unlock_irq(&ndlp->lock); 2371 if (!lpfc_nlp_get(ndlp)) { 2372 dev_warn(&vport->phba->pcidev->dev, 2373 "Warning - No node ref - exit register\n"); 2374 return 0; 2375 } 2376 } 2377 2378 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); 2379 if (!ret) { 2380 /* If the ndlp already has an nrport, this is just 2381 * a resume of the existing rport. Else this is a 2382 * new rport. 2383 */ 2384 /* Guard against an unregister/reregister 2385 * race that leaves the WAIT flag set. 2386 */ 2387 spin_lock_irq(&ndlp->lock); 2388 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2389 ndlp->fc4_xpt_flags |= NVME_XPT_REGD; 2390 spin_unlock_irq(&ndlp->lock); 2391 rport = remote_port->private; 2392 if (oldrport) { 2393 2394 /* Sever the ndlp<->rport association 2395 * before dropping the ndlp ref from 2396 * register. 2397 */ 2398 spin_lock_irq(&ndlp->lock); 2399 ndlp->nrport = NULL; 2400 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; 2401 spin_unlock_irq(&ndlp->lock); 2402 rport->ndlp = NULL; 2403 rport->remoteport = NULL; 2404 2405 /* Reference only removed if previous NDLP is no longer 2406 * active. It might be just a swap and removing the 2407 * reference would cause a premature cleanup. 2408 */ 2409 if (prev_ndlp && prev_ndlp != ndlp) { 2410 if (!prev_ndlp->nrport) 2411 lpfc_nlp_put(prev_ndlp); 2412 } 2413 } 2414 2415 /* Clean bind the rport to the ndlp. */ 2416 rport->remoteport = remote_port; 2417 rport->lport = lport; 2418 rport->ndlp = ndlp; 2419 spin_lock_irq(&ndlp->lock); 2420 ndlp->nrport = rport; 2421 spin_unlock_irq(&ndlp->lock); 2422 lpfc_printf_vlog(vport, KERN_INFO, 2423 LOG_NVME_DISC | LOG_NODE, 2424 "6022 Bind lport x%px to remoteport x%px " 2425 "rport x%px WWNN 0x%llx, " 2426 "Rport WWPN 0x%llx DID " 2427 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", 2428 lport, remote_port, rport, 2429 rpinfo.node_name, rpinfo.port_name, 2430 rpinfo.port_id, rpinfo.port_role, 2431 ndlp, prev_ndlp); 2432 } else { 2433 lpfc_printf_vlog(vport, KERN_ERR, 2434 LOG_TRACE_EVENT, 2435 "6031 RemotePort Registration failed " 2436 "err: %d, DID x%06x\n", 2437 ret, ndlp->nlp_DID); 2438 } 2439 2440 return ret; 2441 #else 2442 return 0; 2443 #endif 2444 } 2445 2446 /* 2447 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport 2448 * 2449 * If the ndlp represents an NVME Target, that we are logged into, 2450 * ping the NVME FC Transport layer to initiate a device rescan 2451 * on this remote NPort. 2452 */ 2453 void 2454 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2455 { 2456 #if (IS_ENABLED(CONFIG_NVME_FC)) 2457 struct lpfc_nvme_rport *nrport; 2458 struct nvme_fc_remote_port *remoteport = NULL; 2459 2460 spin_lock_irq(&ndlp->lock); 2461 nrport = lpfc_ndlp_get_nrport(ndlp); 2462 if (nrport) 2463 remoteport = nrport->remoteport; 2464 spin_unlock_irq(&ndlp->lock); 2465 2466 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2467 "6170 Rescan NPort DID x%06x type x%x " 2468 "state x%x nrport x%px remoteport x%px\n", 2469 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, 2470 nrport, remoteport); 2471 2472 if (!nrport || !remoteport) 2473 goto rescan_exit; 2474 2475 /* Rescan an NVME target in MAPPED state with DISCOVERY role set */ 2476 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && 2477 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 2478 nvme_fc_rescan_remoteport(remoteport); 2479 2480 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2481 "6172 NVME rescanned DID x%06x " 2482 "port_state x%x\n", 2483 ndlp->nlp_DID, remoteport->port_state); 2484 } 2485 return; 2486 rescan_exit: 2487 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2488 "6169 Skip NVME Rport Rescan, NVME remoteport " 2489 "unregistered\n"); 2490 #endif 2491 } 2492 2493 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport. 2494 * 2495 * There is no notion of Devloss or rport recovery from the current 2496 * nvme_transport perspective. Loss of an rport just means IO cannot 2497 * be sent and recovery is completely up to the initator. 2498 * For now, the driver just unbinds the DID and port_role so that 2499 * no further IO can be issued. Changes are planned for later. 2500 * 2501 * Notes - the ndlp reference count is not decremented here since 2502 * since there is no nvme_transport api for devloss. Node ref count 2503 * is only adjusted in driver unload. 2504 */ 2505 void 2506 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2507 { 2508 #if (IS_ENABLED(CONFIG_NVME_FC)) 2509 int ret; 2510 struct nvme_fc_local_port *localport; 2511 struct lpfc_nvme_lport *lport; 2512 struct lpfc_nvme_rport *rport; 2513 struct nvme_fc_remote_port *remoteport = NULL; 2514 2515 localport = vport->localport; 2516 2517 /* This is fundamental error. The localport is always 2518 * available until driver unload. Just exit. 2519 */ 2520 if (!localport) 2521 return; 2522 2523 lport = (struct lpfc_nvme_lport *)localport->private; 2524 if (!lport) 2525 goto input_err; 2526 2527 spin_lock_irq(&ndlp->lock); 2528 rport = lpfc_ndlp_get_nrport(ndlp); 2529 if (rport) 2530 remoteport = rport->remoteport; 2531 spin_unlock_irq(&ndlp->lock); 2532 if (!remoteport) 2533 goto input_err; 2534 2535 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2536 "6033 Unreg nvme remoteport x%px, portname x%llx, " 2537 "port_id x%06x, portstate x%x port type x%x " 2538 "refcnt %d\n", 2539 remoteport, remoteport->port_name, 2540 remoteport->port_id, remoteport->port_state, 2541 ndlp->nlp_type, kref_read(&ndlp->kref)); 2542 2543 /* Sanity check ndlp type. Only call for NVME ports. Don't 2544 * clear any rport state until the transport calls back. 2545 */ 2546 2547 if (ndlp->nlp_type & NLP_NVME_TARGET) { 2548 /* No concern about the role change on the nvme remoteport. 2549 * The transport will update it. 2550 */ 2551 spin_lock_irq(&vport->phba->hbalock); 2552 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; 2553 spin_unlock_irq(&vport->phba->hbalock); 2554 2555 /* Don't let the host nvme transport keep sending keep-alives 2556 * on this remoteport. Vport is unloading, no recovery. The 2557 * return values is ignored. The upcall is a courtesy to the 2558 * transport. 2559 */ 2560 if (vport->load_flag & FC_UNLOADING) 2561 (void)nvme_fc_set_remoteport_devloss(remoteport, 0); 2562 2563 ret = nvme_fc_unregister_remoteport(remoteport); 2564 2565 /* The driver no longer knows if the nrport memory is valid. 2566 * because the controller teardown process has begun and 2567 * is asynchronous. Break the binding in the ndlp. Also 2568 * remove the register ndlp reference to setup node release. 2569 */ 2570 ndlp->nrport = NULL; 2571 lpfc_nlp_put(ndlp); 2572 if (ret != 0) { 2573 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2574 "6167 NVME unregister failed %d " 2575 "port_state x%x\n", 2576 ret, remoteport->port_state); 2577 } 2578 } 2579 return; 2580 2581 input_err: 2582 #endif 2583 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2584 "6168 State error: lport x%px, rport x%px FCID x%06x\n", 2585 vport->localport, ndlp->rport, ndlp->nlp_DID); 2586 } 2587 2588 /** 2589 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort 2590 * @phba: pointer to lpfc hba data structure. 2591 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2592 * 2593 * This routine is invoked by the worker thread to process a SLI4 fast-path 2594 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2595 * here. 2596 **/ 2597 void 2598 lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba, 2599 struct lpfc_io_buf *lpfc_ncmd) 2600 { 2601 struct nvmefc_fcp_req *nvme_cmd = NULL; 2602 2603 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2604 "6533 %s nvme_cmd %p tag x%x abort complete and " 2605 "xri released\n", __func__, 2606 lpfc_ncmd->nvmeCmd, 2607 lpfc_ncmd->cur_iocbq.iotag); 2608 2609 /* Aborted NVME commands are required to not complete 2610 * before the abort exchange command fully completes. 2611 * Once completed, it is available via the put list. 2612 */ 2613 if (lpfc_ncmd->nvmeCmd) { 2614 nvme_cmd = lpfc_ncmd->nvmeCmd; 2615 nvme_cmd->transferred_length = 0; 2616 nvme_cmd->rcv_rsplen = 0; 2617 nvme_cmd->status = NVME_SC_INTERNAL; 2618 nvme_cmd->done(nvme_cmd); 2619 lpfc_ncmd->nvmeCmd = NULL; 2620 } 2621 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2622 } 2623 2624 /** 2625 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort 2626 * @phba: pointer to lpfc hba data structure. 2627 * @axri: pointer to the fcp xri abort wcqe structure. 2628 * @lpfc_ncmd: The nvme job structure for the request being aborted. 2629 * 2630 * This routine is invoked by the worker thread to process a SLI4 fast-path 2631 * NVME aborted xri. Aborted NVME IO commands are completed to the transport 2632 * here. 2633 **/ 2634 void 2635 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, 2636 struct sli4_wcqe_xri_aborted *axri, 2637 struct lpfc_io_buf *lpfc_ncmd) 2638 { 2639 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 2640 struct nvmefc_fcp_req *nvme_cmd = NULL; 2641 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; 2642 2643 2644 if (ndlp) 2645 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 2646 2647 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2648 "6311 nvme_cmd %p xri x%x tag x%x abort complete and " 2649 "xri released\n", 2650 lpfc_ncmd->nvmeCmd, xri, 2651 lpfc_ncmd->cur_iocbq.iotag); 2652 2653 /* Aborted NVME commands are required to not complete 2654 * before the abort exchange command fully completes. 2655 * Once completed, it is available via the put list. 2656 */ 2657 if (lpfc_ncmd->nvmeCmd) { 2658 nvme_cmd = lpfc_ncmd->nvmeCmd; 2659 nvme_cmd->done(nvme_cmd); 2660 lpfc_ncmd->nvmeCmd = NULL; 2661 } 2662 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2663 } 2664 2665 /** 2666 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete 2667 * @phba: Pointer to HBA context object. 2668 * 2669 * This function flushes all wqes in the nvme rings and frees all resources 2670 * in the txcmplq. This function does not issue abort wqes for the IO 2671 * commands in txcmplq, they will just be returned with 2672 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 2673 * slot has been permanently disabled. 2674 **/ 2675 void 2676 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) 2677 { 2678 struct lpfc_sli_ring *pring; 2679 u32 i, wait_cnt = 0; 2680 2681 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) 2682 return; 2683 2684 /* Cycle through all IO rings and make sure all outstanding 2685 * WQEs have been removed from the txcmplqs. 2686 */ 2687 for (i = 0; i < phba->cfg_hdw_queue; i++) { 2688 if (!phba->sli4_hba.hdwq[i].io_wq) 2689 continue; 2690 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 2691 2692 if (!pring) 2693 continue; 2694 2695 /* Retrieve everything on the txcmplq */ 2696 while (!list_empty(&pring->txcmplq)) { 2697 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 2698 wait_cnt++; 2699 2700 /* The sleep is 10mS. Every ten seconds, 2701 * dump a message. Something is wrong. 2702 */ 2703 if ((wait_cnt % 1000) == 0) { 2704 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2705 "6178 NVME IO not empty, " 2706 "cnt %d\n", wait_cnt); 2707 } 2708 } 2709 } 2710 2711 /* Make sure HBA is alive */ 2712 lpfc_issue_hb_tmo(phba); 2713 2714 } 2715 2716 void 2717 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 2718 uint32_t stat, uint32_t param) 2719 { 2720 #if (IS_ENABLED(CONFIG_NVME_FC)) 2721 struct lpfc_io_buf *lpfc_ncmd; 2722 struct nvmefc_fcp_req *nCmd; 2723 struct lpfc_wcqe_complete wcqe; 2724 struct lpfc_wcqe_complete *wcqep = &wcqe; 2725 2726 lpfc_ncmd = pwqeIn->io_buf; 2727 if (!lpfc_ncmd) { 2728 lpfc_sli_release_iocbq(phba, pwqeIn); 2729 return; 2730 } 2731 /* For abort iocb just return, IO iocb will do a done call */ 2732 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == 2733 CMD_ABORT_XRI_CX) { 2734 lpfc_sli_release_iocbq(phba, pwqeIn); 2735 return; 2736 } 2737 2738 spin_lock(&lpfc_ncmd->buf_lock); 2739 nCmd = lpfc_ncmd->nvmeCmd; 2740 if (!nCmd) { 2741 spin_unlock(&lpfc_ncmd->buf_lock); 2742 lpfc_release_nvme_buf(phba, lpfc_ncmd); 2743 return; 2744 } 2745 spin_unlock(&lpfc_ncmd->buf_lock); 2746 2747 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2748 "6194 NVME Cancel xri %x\n", 2749 lpfc_ncmd->cur_iocbq.sli4_xritag); 2750 2751 wcqep->word0 = 0; 2752 bf_set(lpfc_wcqe_c_status, wcqep, stat); 2753 wcqep->parameter = param; 2754 wcqep->word3 = 0; /* xb is 0 */ 2755 2756 /* Call release with XB=1 to queue the IO into the abort list. */ 2757 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 2758 bf_set(lpfc_wcqe_c_xb, wcqep, 1); 2759 2760 memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep)); 2761 (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn); 2762 #endif 2763 } 2764