1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channsel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include <../drivers/nvme/host/nvme.h> 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_version.h" 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc.h" 50 #include "lpfc_scsi.h" 51 #include "lpfc_nvme.h" 52 #include "lpfc_nvmet.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_vport.h" 56 #include "lpfc_debugfs.h" 57 58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 59 struct lpfc_nvmet_rcv_ctx *, 60 dma_addr_t rspbuf, 61 uint16_t rspsize); 62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, 63 struct lpfc_nvmet_rcv_ctx *); 64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, 65 struct lpfc_nvmet_rcv_ctx *, 66 uint32_t, uint16_t); 67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, 68 struct lpfc_nvmet_rcv_ctx *, 69 uint32_t, uint16_t); 70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, 71 struct lpfc_nvmet_rcv_ctx *, 72 uint32_t, uint16_t); 73 74 void 75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) 76 { 77 unsigned long iflag; 78 79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 80 "6313 NVMET Defer ctx release xri x%x flg x%x\n", 81 ctxp->oxid, ctxp->flag); 82 83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) { 85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 86 iflag); 87 return; 88 } 89 ctxp->flag |= LPFC_NVMET_CTX_RLS; 90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 92 } 93 94 /** 95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response 96 * @phba: Pointer to HBA context object. 97 * @cmdwqe: Pointer to driver command WQE object. 98 * @wcqe: Pointer to driver response CQE object. 99 * 100 * The function is called from SLI ring event handler with no 101 * lock held. This function is the completion handler for NVME LS commands 102 * The function frees memory resources used for the NVME commands. 103 **/ 104 static void 105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 106 struct lpfc_wcqe_complete *wcqe) 107 { 108 struct lpfc_nvmet_tgtport *tgtp; 109 struct nvmefc_tgt_ls_req *rsp; 110 struct lpfc_nvmet_rcv_ctx *ctxp; 111 uint32_t status, result; 112 113 status = bf_get(lpfc_wcqe_c_status, wcqe); 114 result = wcqe->parameter; 115 if (!phba->targetport) 116 goto out; 117 118 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 119 120 if (status) 121 atomic_inc(&tgtp->xmt_ls_rsp_error); 122 else 123 atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 124 125 out: 126 ctxp = cmdwqe->context2; 127 rsp = &ctxp->ctx.ls_req; 128 129 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", 130 ctxp->oxid, status, result); 131 132 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 133 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__, 134 ctxp, status, result); 135 136 lpfc_nlp_put(cmdwqe->context1); 137 cmdwqe->context2 = NULL; 138 cmdwqe->context3 = NULL; 139 lpfc_sli_release_iocbq(phba, cmdwqe); 140 rsp->done(rsp); 141 kfree(ctxp); 142 } 143 144 /** 145 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context 146 * @phba: HBA buffer is associated with 147 * @ctxp: context to clean up 148 * @mp: Buffer to free 149 * 150 * Description: Frees the given DMA buffer in the appropriate way given by 151 * reposting it to its associated RQ so it can be reused. 152 * 153 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 154 * 155 * Returns: None 156 **/ 157 void 158 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, 159 struct lpfc_dmabuf *mp) 160 { 161 if (ctxp) { 162 if (ctxp->flag) 163 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 164 "6314 rq_post ctx xri x%x flag x%x\n", 165 ctxp->oxid, ctxp->flag); 166 167 if (ctxp->txrdy) { 168 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 169 ctxp->txrdy_phys); 170 ctxp->txrdy = NULL; 171 ctxp->txrdy_phys = 0; 172 } 173 ctxp->state = LPFC_NVMET_STE_FREE; 174 } 175 lpfc_rq_buf_free(phba, mp); 176 } 177 178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 179 static void 180 lpfc_nvmet_ktime(struct lpfc_hba *phba, 181 struct lpfc_nvmet_rcv_ctx *ctxp) 182 { 183 uint64_t seg1, seg2, seg3, seg4, seg5; 184 uint64_t seg6, seg7, seg8, seg9, seg10; 185 186 if (!phba->ktime_on) 187 return; 188 189 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 190 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 191 !ctxp->ts_isr_data || !ctxp->ts_data_nvme || 192 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || 193 !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 194 return; 195 196 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 197 return; 198 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 199 return; 200 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) 201 return; 202 if (ctxp->ts_data_wqput > ctxp->ts_isr_data) 203 return; 204 if (ctxp->ts_isr_data > ctxp->ts_data_nvme) 205 return; 206 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) 207 return; 208 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) 209 return; 210 if (ctxp->ts_status_wqput > ctxp->ts_isr_status) 211 return; 212 if (ctxp->ts_isr_status > ctxp->ts_status_nvme) 213 return; 214 /* 215 * Segment 1 - Time from FCP command received by MSI-X ISR 216 * to FCP command is passed to NVME Layer. 217 * Segment 2 - Time from FCP command payload handed 218 * off to NVME Layer to Driver receives a Command op 219 * from NVME Layer. 220 * Segment 3 - Time from Driver receives a Command op 221 * from NVME Layer to Command is put on WQ. 222 * Segment 4 - Time from Driver WQ put is done 223 * to MSI-X ISR for Command cmpl. 224 * Segment 5 - Time from MSI-X ISR for Command cmpl to 225 * Command cmpl is passed to NVME Layer. 226 * Segment 6 - Time from Command cmpl is passed to NVME 227 * Layer to Driver receives a RSP op from NVME Layer. 228 * Segment 7 - Time from Driver receives a RSP op from 229 * NVME Layer to WQ put is done on TRSP FCP Status. 230 * Segment 8 - Time from Driver WQ put is done on TRSP 231 * FCP Status to MSI-X ISR for TRSP cmpl. 232 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to 233 * TRSP cmpl is passed to NVME Layer. 234 * Segment 10 - Time from FCP command received by 235 * MSI-X ISR to command is completed on wire. 236 * (Segments 1 thru 8) for READDATA / WRITEDATA 237 * (Segments 1 thru 4) for READDATA_RSP 238 */ 239 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 240 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; 241 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - 242 seg1 - seg2; 243 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - 244 seg1 - seg2 - seg3; 245 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - 246 seg1 - seg2 - seg3 - seg4; 247 248 /* For auto rsp commands seg6 thru seg10 will be 0 */ 249 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 250 seg6 = (ctxp->ts_nvme_status - 251 ctxp->ts_isr_cmd) - 252 seg1 - seg2 - seg3 - seg4 - seg5; 253 seg7 = (ctxp->ts_status_wqput - 254 ctxp->ts_isr_cmd) - 255 seg1 - seg2 - seg3 - 256 seg4 - seg5 - seg6; 257 seg8 = (ctxp->ts_isr_status - 258 ctxp->ts_isr_cmd) - 259 seg1 - seg2 - seg3 - seg4 - 260 seg5 - seg6 - seg7; 261 seg9 = (ctxp->ts_status_nvme - 262 ctxp->ts_isr_cmd) - 263 seg1 - seg2 - seg3 - seg4 - 264 seg5 - seg6 - seg7 - seg8; 265 seg10 = (ctxp->ts_isr_status - 266 ctxp->ts_isr_cmd); 267 } else { 268 seg6 = 0; 269 seg7 = 0; 270 seg8 = 0; 271 seg9 = 0; 272 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); 273 } 274 275 phba->ktime_seg1_total += seg1; 276 if (seg1 < phba->ktime_seg1_min) 277 phba->ktime_seg1_min = seg1; 278 else if (seg1 > phba->ktime_seg1_max) 279 phba->ktime_seg1_max = seg1; 280 281 phba->ktime_seg2_total += seg2; 282 if (seg2 < phba->ktime_seg2_min) 283 phba->ktime_seg2_min = seg2; 284 else if (seg2 > phba->ktime_seg2_max) 285 phba->ktime_seg2_max = seg2; 286 287 phba->ktime_seg3_total += seg3; 288 if (seg3 < phba->ktime_seg3_min) 289 phba->ktime_seg3_min = seg3; 290 else if (seg3 > phba->ktime_seg3_max) 291 phba->ktime_seg3_max = seg3; 292 293 phba->ktime_seg4_total += seg4; 294 if (seg4 < phba->ktime_seg4_min) 295 phba->ktime_seg4_min = seg4; 296 else if (seg4 > phba->ktime_seg4_max) 297 phba->ktime_seg4_max = seg4; 298 299 phba->ktime_seg5_total += seg5; 300 if (seg5 < phba->ktime_seg5_min) 301 phba->ktime_seg5_min = seg5; 302 else if (seg5 > phba->ktime_seg5_max) 303 phba->ktime_seg5_max = seg5; 304 305 phba->ktime_data_samples++; 306 if (!seg6) 307 goto out; 308 309 phba->ktime_seg6_total += seg6; 310 if (seg6 < phba->ktime_seg6_min) 311 phba->ktime_seg6_min = seg6; 312 else if (seg6 > phba->ktime_seg6_max) 313 phba->ktime_seg6_max = seg6; 314 315 phba->ktime_seg7_total += seg7; 316 if (seg7 < phba->ktime_seg7_min) 317 phba->ktime_seg7_min = seg7; 318 else if (seg7 > phba->ktime_seg7_max) 319 phba->ktime_seg7_max = seg7; 320 321 phba->ktime_seg8_total += seg8; 322 if (seg8 < phba->ktime_seg8_min) 323 phba->ktime_seg8_min = seg8; 324 else if (seg8 > phba->ktime_seg8_max) 325 phba->ktime_seg8_max = seg8; 326 327 phba->ktime_seg9_total += seg9; 328 if (seg9 < phba->ktime_seg9_min) 329 phba->ktime_seg9_min = seg9; 330 else if (seg9 > phba->ktime_seg9_max) 331 phba->ktime_seg9_max = seg9; 332 out: 333 phba->ktime_seg10_total += seg10; 334 if (seg10 < phba->ktime_seg10_min) 335 phba->ktime_seg10_min = seg10; 336 else if (seg10 > phba->ktime_seg10_max) 337 phba->ktime_seg10_max = seg10; 338 phba->ktime_status_samples++; 339 } 340 #endif 341 342 /** 343 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 344 * @phba: Pointer to HBA context object. 345 * @cmdwqe: Pointer to driver command WQE object. 346 * @wcqe: Pointer to driver response CQE object. 347 * 348 * The function is called from SLI ring event handler with no 349 * lock held. This function is the completion handler for NVME FCP commands 350 * The function frees memory resources used for the NVME commands. 351 **/ 352 static void 353 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 354 struct lpfc_wcqe_complete *wcqe) 355 { 356 struct lpfc_nvmet_tgtport *tgtp; 357 struct nvmefc_tgt_fcp_req *rsp; 358 struct lpfc_nvmet_rcv_ctx *ctxp; 359 uint32_t status, result, op, start_clean; 360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 361 uint32_t id; 362 #endif 363 364 ctxp = cmdwqe->context2; 365 ctxp->flag &= ~LPFC_NVMET_IO_INP; 366 367 rsp = &ctxp->ctx.fcp_req; 368 op = rsp->op; 369 370 status = bf_get(lpfc_wcqe_c_status, wcqe); 371 result = wcqe->parameter; 372 373 if (phba->targetport) 374 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 375 else 376 tgtp = NULL; 377 378 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", 379 ctxp->oxid, op, status); 380 381 if (status) { 382 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 383 rsp->transferred_length = 0; 384 if (tgtp) 385 atomic_inc(&tgtp->xmt_fcp_rsp_error); 386 387 /* pick up SLI4 exhange busy condition */ 388 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 389 ctxp->flag |= LPFC_NVMET_XBUSY; 390 391 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 392 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n", 393 ctxp->oxid, status, result); 394 } else { 395 ctxp->flag &= ~LPFC_NVMET_XBUSY; 396 } 397 398 } else { 399 rsp->fcp_error = NVME_SC_SUCCESS; 400 if (op == NVMET_FCOP_RSP) 401 rsp->transferred_length = rsp->rsplen; 402 else 403 rsp->transferred_length = rsp->transfer_length; 404 if (tgtp) 405 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); 406 } 407 408 if ((op == NVMET_FCOP_READDATA_RSP) || 409 (op == NVMET_FCOP_RSP)) { 410 /* Sanity check */ 411 ctxp->state = LPFC_NVMET_STE_DONE; 412 ctxp->entry_cnt++; 413 414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 415 if (phba->ktime_on) { 416 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 417 ctxp->ts_isr_data = 418 cmdwqe->isr_timestamp; 419 ctxp->ts_data_nvme = 420 ktime_get_ns(); 421 ctxp->ts_nvme_status = 422 ctxp->ts_data_nvme; 423 ctxp->ts_status_wqput = 424 ctxp->ts_data_nvme; 425 ctxp->ts_isr_status = 426 ctxp->ts_data_nvme; 427 ctxp->ts_status_nvme = 428 ctxp->ts_data_nvme; 429 } else { 430 ctxp->ts_isr_status = 431 cmdwqe->isr_timestamp; 432 ctxp->ts_status_nvme = 433 ktime_get_ns(); 434 } 435 } 436 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 437 id = smp_processor_id(); 438 if (ctxp->cpu != id) 439 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 440 "6703 CPU Check cmpl: " 441 "cpu %d expect %d\n", 442 id, ctxp->cpu); 443 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 444 phba->cpucheck_cmpl_io[id]++; 445 } 446 #endif 447 rsp->done(rsp); 448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 449 if (phba->ktime_on) 450 lpfc_nvmet_ktime(phba, ctxp); 451 #endif 452 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ 453 } else { 454 ctxp->entry_cnt++; 455 start_clean = offsetof(struct lpfc_iocbq, wqe); 456 memset(((char *)cmdwqe) + start_clean, 0, 457 (sizeof(struct lpfc_iocbq) - start_clean)); 458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 459 if (phba->ktime_on) { 460 ctxp->ts_isr_data = cmdwqe->isr_timestamp; 461 ctxp->ts_data_nvme = ktime_get_ns(); 462 } 463 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 464 id = smp_processor_id(); 465 if (ctxp->cpu != id) 466 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 467 "6704 CPU Check cmdcmpl: " 468 "cpu %d expect %d\n", 469 id, ctxp->cpu); 470 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 471 phba->cpucheck_ccmpl_io[id]++; 472 } 473 #endif 474 rsp->done(rsp); 475 } 476 } 477 478 static int 479 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, 480 struct nvmefc_tgt_ls_req *rsp) 481 { 482 struct lpfc_nvmet_rcv_ctx *ctxp = 483 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); 484 struct lpfc_hba *phba = ctxp->phba; 485 struct hbq_dmabuf *nvmebuf = 486 (struct hbq_dmabuf *)ctxp->rqb_buffer; 487 struct lpfc_iocbq *nvmewqeq; 488 struct lpfc_nvmet_tgtport *nvmep = tgtport->private; 489 struct lpfc_dmabuf dmabuf; 490 struct ulp_bde64 bpl; 491 int rc; 492 493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 494 "6023 %s: Entrypoint ctx %p %p\n", __func__, 495 ctxp, tgtport); 496 497 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, 498 rsp->rsplen); 499 if (nvmewqeq == NULL) { 500 atomic_inc(&nvmep->xmt_ls_drop); 501 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 502 "6150 LS Drop IO x%x: Prep\n", 503 ctxp->oxid); 504 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 505 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 506 ctxp->sid, ctxp->oxid); 507 return -ENOMEM; 508 } 509 510 /* Save numBdes for bpl2sgl */ 511 nvmewqeq->rsvd2 = 1; 512 nvmewqeq->hba_wqidx = 0; 513 nvmewqeq->context3 = &dmabuf; 514 dmabuf.virt = &bpl; 515 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; 516 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; 517 bpl.tus.f.bdeSize = rsp->rsplen; 518 bpl.tus.f.bdeFlags = 0; 519 bpl.tus.w = le32_to_cpu(bpl.tus.w); 520 521 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; 522 nvmewqeq->iocb_cmpl = NULL; 523 nvmewqeq->context2 = ctxp; 524 525 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", 526 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); 527 528 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 529 if (rc == WQE_SUCCESS) { 530 /* 531 * Okay to repost buffer here, but wait till cmpl 532 * before freeing ctxp and iocbq. 533 */ 534 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 535 ctxp->rqb_buffer = 0; 536 atomic_inc(&nvmep->xmt_ls_rsp); 537 return 0; 538 } 539 /* Give back resources */ 540 atomic_inc(&nvmep->xmt_ls_drop); 541 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 542 "6151 LS Drop IO x%x: Issue %d\n", 543 ctxp->oxid, rc); 544 545 lpfc_nlp_put(nvmewqeq->context1); 546 547 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 548 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 549 return -ENXIO; 550 } 551 552 static int 553 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, 554 struct nvmefc_tgt_fcp_req *rsp) 555 { 556 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 557 struct lpfc_nvmet_rcv_ctx *ctxp = 558 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 559 struct lpfc_hba *phba = ctxp->phba; 560 struct lpfc_iocbq *nvmewqeq; 561 int rc; 562 563 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 564 if (phba->ktime_on) { 565 if (rsp->op == NVMET_FCOP_RSP) 566 ctxp->ts_nvme_status = ktime_get_ns(); 567 else 568 ctxp->ts_nvme_data = ktime_get_ns(); 569 } 570 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 571 int id = smp_processor_id(); 572 ctxp->cpu = id; 573 if (id < LPFC_CHECK_CPU_CNT) 574 phba->cpucheck_xmt_io[id]++; 575 if (rsp->hwqid != id) { 576 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 577 "6705 CPU Check OP: " 578 "cpu %d expect %d\n", 579 id, rsp->hwqid); 580 ctxp->cpu = rsp->hwqid; 581 } 582 } 583 #endif 584 585 /* Sanity check */ 586 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || 587 (ctxp->state == LPFC_NVMET_STE_ABORT)) { 588 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 589 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 590 "6102 IO xri x%x aborted\n", 591 ctxp->oxid); 592 rc = -ENXIO; 593 goto aerr; 594 } 595 596 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 597 if (nvmewqeq == NULL) { 598 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 599 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 600 "6152 FCP Drop IO x%x: Prep\n", 601 ctxp->oxid); 602 rc = -ENXIO; 603 goto aerr; 604 } 605 606 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; 607 nvmewqeq->iocb_cmpl = NULL; 608 nvmewqeq->context2 = ctxp; 609 nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 610 ctxp->wqeq->hba_wqidx = rsp->hwqid; 611 612 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 613 ctxp->oxid, rsp->op, rsp->rsplen); 614 615 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 616 if (rc == WQE_SUCCESS) { 617 ctxp->flag |= LPFC_NVMET_IO_INP; 618 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 619 if (!phba->ktime_on) 620 return 0; 621 if (rsp->op == NVMET_FCOP_RSP) 622 ctxp->ts_status_wqput = ktime_get_ns(); 623 else 624 ctxp->ts_data_wqput = ktime_get_ns(); 625 #endif 626 return 0; 627 } 628 629 /* Give back resources */ 630 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 631 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 632 "6153 FCP Drop IO x%x: Issue: %d\n", 633 ctxp->oxid, rc); 634 635 ctxp->wqeq->hba_wqidx = 0; 636 nvmewqeq->context2 = NULL; 637 nvmewqeq->context3 = NULL; 638 rc = -EBUSY; 639 aerr: 640 return rc; 641 } 642 643 static void 644 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) 645 { 646 struct lpfc_nvmet_tgtport *tport = targetport->private; 647 648 /* release any threads waiting for the unreg to complete */ 649 complete(&tport->tport_unreg_done); 650 } 651 652 static void 653 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, 654 struct nvmefc_tgt_fcp_req *req) 655 { 656 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 657 struct lpfc_nvmet_rcv_ctx *ctxp = 658 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 659 struct lpfc_hba *phba = ctxp->phba; 660 unsigned long flags; 661 662 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 663 "6103 Abort op: oxri x%x flg x%x cnt %d\n", 664 ctxp->oxid, ctxp->flag, ctxp->entry_cnt); 665 666 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: " 667 "xri x%x flg x%x cnt x%x\n", 668 ctxp->oxid, ctxp->flag, ctxp->entry_cnt); 669 670 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 671 ctxp->entry_cnt++; 672 spin_lock_irqsave(&ctxp->ctxlock, flags); 673 674 /* Since iaab/iaar are NOT set, we need to check 675 * if the firmware is in process of aborting IO 676 */ 677 if (ctxp->flag & LPFC_NVMET_XBUSY) { 678 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 679 return; 680 } 681 ctxp->flag |= LPFC_NVMET_ABORT_OP; 682 if (ctxp->flag & LPFC_NVMET_IO_INP) 683 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, 684 ctxp->oxid); 685 else 686 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 687 ctxp->oxid); 688 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 689 } 690 691 static void 692 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 693 struct nvmefc_tgt_fcp_req *rsp) 694 { 695 struct lpfc_nvmet_rcv_ctx *ctxp = 696 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 697 struct lpfc_hba *phba = ctxp->phba; 698 unsigned long flags; 699 bool aborting = false; 700 701 spin_lock_irqsave(&ctxp->ctxlock, flags); 702 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || 703 (ctxp->flag & LPFC_NVMET_XBUSY)) { 704 aborting = true; 705 /* let the abort path do the real release */ 706 lpfc_nvmet_defer_release(phba, ctxp); 707 } 708 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 709 710 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, 711 ctxp->state, 0); 712 713 if (aborting) 714 return; 715 716 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 717 } 718 719 static struct nvmet_fc_target_template lpfc_tgttemplate = { 720 .targetport_delete = lpfc_nvmet_targetport_delete, 721 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 722 .fcp_op = lpfc_nvmet_xmt_fcp_op, 723 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 724 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 725 726 .max_hw_queues = 1, 727 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 728 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 729 .dma_boundary = 0xFFFFFFFF, 730 731 /* optional features */ 732 .target_features = 0, 733 /* sizes of additional private data for data structures */ 734 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 735 }; 736 737 int 738 lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 739 { 740 struct lpfc_vport *vport = phba->pport; 741 struct lpfc_nvmet_tgtport *tgtp; 742 struct nvmet_fc_port_info pinfo; 743 int error = 0; 744 745 if (phba->targetport) 746 return 0; 747 748 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 749 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 750 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 751 pinfo.port_id = vport->fc_myDID; 752 753 /* Limit to LPFC_MAX_NVME_SEG_CNT. 754 * For now need + 1 to get around NVME transport logic. 755 */ 756 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 757 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 758 "6400 Reducing sg segment cnt to %d\n", 759 LPFC_MAX_NVME_SEG_CNT); 760 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 761 } else { 762 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 763 } 764 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 765 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 766 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 767 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED | 768 NVMET_FCTGTFEAT_CMD_IN_ISR | 769 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 770 771 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 772 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 773 &phba->pcidev->dev, 774 &phba->targetport); 775 #else 776 error = -ENOMEM; 777 #endif 778 if (error) { 779 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 780 "6025 Cannot register NVME targetport " 781 "x%x\n", error); 782 phba->targetport = NULL; 783 } else { 784 tgtp = (struct lpfc_nvmet_tgtport *) 785 phba->targetport->private; 786 tgtp->phba = phba; 787 788 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 789 "6026 Registered NVME " 790 "targetport: %p, private %p " 791 "portnm %llx nodenm %llx\n", 792 phba->targetport, tgtp, 793 pinfo.port_name, pinfo.node_name); 794 795 atomic_set(&tgtp->rcv_ls_req_in, 0); 796 atomic_set(&tgtp->rcv_ls_req_out, 0); 797 atomic_set(&tgtp->rcv_ls_req_drop, 0); 798 atomic_set(&tgtp->xmt_ls_abort, 0); 799 atomic_set(&tgtp->xmt_ls_rsp, 0); 800 atomic_set(&tgtp->xmt_ls_drop, 0); 801 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 802 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 803 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 804 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 805 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 806 atomic_set(&tgtp->xmt_fcp_abort, 0); 807 atomic_set(&tgtp->xmt_fcp_drop, 0); 808 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 809 atomic_set(&tgtp->xmt_fcp_read, 0); 810 atomic_set(&tgtp->xmt_fcp_write, 0); 811 atomic_set(&tgtp->xmt_fcp_rsp, 0); 812 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 813 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 814 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 815 atomic_set(&tgtp->xmt_abort_rsp, 0); 816 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 817 atomic_set(&tgtp->xmt_abort_cmpl, 0); 818 } 819 return error; 820 } 821 822 int 823 lpfc_nvmet_update_targetport(struct lpfc_hba *phba) 824 { 825 struct lpfc_vport *vport = phba->pport; 826 827 if (!phba->targetport) 828 return 0; 829 830 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 831 "6007 Update NVMET port %p did x%x\n", 832 phba->targetport, vport->fc_myDID); 833 834 phba->targetport->port_id = vport->fc_myDID; 835 return 0; 836 } 837 838 /** 839 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort 840 * @phba: pointer to lpfc hba data structure. 841 * @axri: pointer to the nvmet xri abort wcqe structure. 842 * 843 * This routine is invoked by the worker thread to process a SLI4 fast-path 844 * NVMET aborted xri. 845 **/ 846 void 847 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, 848 struct sli4_wcqe_xri_aborted *axri) 849 { 850 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 851 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 852 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 853 struct lpfc_nodelist *ndlp; 854 unsigned long iflag = 0; 855 int rrq_empty = 0; 856 bool released = false; 857 858 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 859 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); 860 861 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 862 return; 863 spin_lock_irqsave(&phba->hbalock, iflag); 864 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 865 list_for_each_entry_safe(ctxp, next_ctxp, 866 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 867 list) { 868 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 869 continue; 870 871 /* Check if we already received a free context call 872 * and we have completed processing an abort situation. 873 */ 874 if (ctxp->flag & LPFC_NVMET_CTX_RLS && 875 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { 876 list_del(&ctxp->list); 877 released = true; 878 } 879 ctxp->flag &= ~LPFC_NVMET_XBUSY; 880 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 881 882 rrq_empty = list_empty(&phba->active_rrq_list); 883 spin_unlock_irqrestore(&phba->hbalock, iflag); 884 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 885 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 886 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 887 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 888 lpfc_set_rrq_active(phba, ndlp, 889 ctxp->rqb_buffer->sglq->sli4_lxritag, 890 rxid, 1); 891 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 892 } 893 894 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 895 "6318 XB aborted %x flg x%x (%x)\n", 896 ctxp->oxid, ctxp->flag, released); 897 if (released) 898 lpfc_nvmet_rq_post(phba, ctxp, 899 &ctxp->rqb_buffer->hbuf); 900 if (rrq_empty) 901 lpfc_worker_wake_up(phba); 902 return; 903 } 904 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 905 spin_unlock_irqrestore(&phba->hbalock, iflag); 906 } 907 908 int 909 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 910 struct fc_frame_header *fc_hdr) 911 912 { 913 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 914 struct lpfc_hba *phba = vport->phba; 915 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 916 struct nvmefc_tgt_fcp_req *rsp; 917 uint16_t xri; 918 unsigned long iflag = 0; 919 920 xri = be16_to_cpu(fc_hdr->fh_ox_id); 921 922 spin_lock_irqsave(&phba->hbalock, iflag); 923 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 924 list_for_each_entry_safe(ctxp, next_ctxp, 925 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 926 list) { 927 if (ctxp->rqb_buffer->sglq->sli4_xritag != xri) 928 continue; 929 930 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 931 spin_unlock_irqrestore(&phba->hbalock, iflag); 932 933 spin_lock_irqsave(&ctxp->ctxlock, iflag); 934 ctxp->flag |= LPFC_NVMET_ABTS_RCV; 935 spin_unlock_irqrestore(&ctxp->ctxlock, iflag); 936 937 lpfc_nvmeio_data(phba, 938 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 939 xri, smp_processor_id(), 0); 940 941 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 942 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); 943 944 rsp = &ctxp->ctx.fcp_req; 945 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); 946 947 /* Respond with BA_ACC accordingly */ 948 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); 949 return 0; 950 } 951 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 952 spin_unlock_irqrestore(&phba->hbalock, iflag); 953 954 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 955 xri, smp_processor_id(), 1); 956 957 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 958 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); 959 960 /* Respond with BA_RJT accordingly */ 961 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); 962 #endif 963 return 0; 964 } 965 966 void 967 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 968 { 969 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 970 struct lpfc_nvmet_tgtport *tgtp; 971 972 if (phba->nvmet_support == 0) 973 return; 974 if (phba->targetport) { 975 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 976 init_completion(&tgtp->tport_unreg_done); 977 nvmet_fc_unregister_targetport(phba->targetport); 978 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 979 } 980 phba->targetport = NULL; 981 #endif 982 } 983 984 /** 985 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer 986 * @phba: pointer to lpfc hba data structure. 987 * @pring: pointer to a SLI ring. 988 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 989 * 990 * This routine is used for processing the WQE associated with a unsolicited 991 * event. It first determines whether there is an existing ndlp that matches 992 * the DID from the unsolicited WQE. If not, it will create a new one with 993 * the DID from the unsolicited WQE. The ELS command from the unsolicited 994 * WQE is then used to invoke the proper routine and to set up proper state 995 * of the discovery state machine. 996 **/ 997 static void 998 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 999 struct hbq_dmabuf *nvmebuf) 1000 { 1001 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1002 struct lpfc_nvmet_tgtport *tgtp; 1003 struct fc_frame_header *fc_hdr; 1004 struct lpfc_nvmet_rcv_ctx *ctxp; 1005 uint32_t *payload; 1006 uint32_t size, oxid, sid, rc; 1007 1008 if (!nvmebuf || !phba->targetport) { 1009 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1010 "6154 LS Drop IO\n"); 1011 oxid = 0; 1012 size = 0; 1013 sid = 0; 1014 goto dropit; 1015 } 1016 1017 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1018 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1019 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1020 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 1021 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1022 sid = sli4_sid_from_fc_hdr(fc_hdr); 1023 1024 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); 1025 if (ctxp == NULL) { 1026 atomic_inc(&tgtp->rcv_ls_req_drop); 1027 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1028 "6155 LS Drop IO x%x: Alloc\n", 1029 oxid); 1030 dropit: 1031 lpfc_nvmeio_data(phba, "NVMET LS DROP: " 1032 "xri x%x sz %d from %06x\n", 1033 oxid, size, sid); 1034 if (nvmebuf) 1035 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1036 return; 1037 } 1038 ctxp->phba = phba; 1039 ctxp->size = size; 1040 ctxp->oxid = oxid; 1041 ctxp->sid = sid; 1042 ctxp->wqeq = NULL; 1043 ctxp->state = LPFC_NVMET_STE_RCV; 1044 ctxp->rqb_buffer = (void *)nvmebuf; 1045 1046 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", 1047 oxid, size, sid); 1048 /* 1049 * The calling sequence should be: 1050 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done 1051 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. 1052 */ 1053 atomic_inc(&tgtp->rcv_ls_req_in); 1054 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, 1055 payload, size); 1056 1057 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1058 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x " 1059 "%08x %08x %08x\n", __func__, ctxp, size, rc, 1060 *payload, *(payload+1), *(payload+2), 1061 *(payload+3), *(payload+4), *(payload+5)); 1062 1063 if (rc == 0) { 1064 atomic_inc(&tgtp->rcv_ls_req_out); 1065 return; 1066 } 1067 1068 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", 1069 oxid, size, sid); 1070 1071 atomic_inc(&tgtp->rcv_ls_req_drop); 1072 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1073 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", 1074 ctxp->oxid, rc); 1075 1076 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1077 if (nvmebuf) 1078 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1079 1080 atomic_inc(&tgtp->xmt_ls_abort); 1081 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 1082 #endif 1083 } 1084 1085 /** 1086 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer 1087 * @phba: pointer to lpfc hba data structure. 1088 * @pring: pointer to a SLI ring. 1089 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1090 * 1091 * This routine is used for processing the WQE associated with a unsolicited 1092 * event. It first determines whether there is an existing ndlp that matches 1093 * the DID from the unsolicited WQE. If not, it will create a new one with 1094 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1095 * WQE is then used to invoke the proper routine and to set up proper state 1096 * of the discovery state machine. 1097 **/ 1098 static void 1099 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, 1100 struct lpfc_sli_ring *pring, 1101 struct rqb_dmabuf *nvmebuf, 1102 uint64_t isr_timestamp) 1103 { 1104 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1105 struct lpfc_nvmet_rcv_ctx *ctxp; 1106 struct lpfc_nvmet_tgtport *tgtp; 1107 struct fc_frame_header *fc_hdr; 1108 uint32_t *payload; 1109 uint32_t size, oxid, sid, rc; 1110 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1111 uint32_t id; 1112 #endif 1113 1114 if (!nvmebuf || !phba->targetport) { 1115 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1116 "6157 FCP Drop IO\n"); 1117 oxid = 0; 1118 size = 0; 1119 sid = 0; 1120 goto dropit; 1121 } 1122 1123 1124 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1125 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1126 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1127 size = nvmebuf->bytes_recv; 1128 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1129 sid = sli4_sid_from_fc_hdr(fc_hdr); 1130 1131 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context; 1132 if (ctxp == NULL) { 1133 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1134 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1135 "6158 FCP Drop IO x%x: Alloc\n", 1136 oxid); 1137 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1138 /* Cannot send ABTS without context */ 1139 return; 1140 } 1141 memset(ctxp, 0, sizeof(ctxp->ctx)); 1142 ctxp->wqeq = NULL; 1143 ctxp->txrdy = NULL; 1144 ctxp->offset = 0; 1145 ctxp->phba = phba; 1146 ctxp->size = size; 1147 ctxp->oxid = oxid; 1148 ctxp->sid = sid; 1149 ctxp->state = LPFC_NVMET_STE_RCV; 1150 ctxp->rqb_buffer = nvmebuf; 1151 ctxp->entry_cnt = 1; 1152 ctxp->flag = 0; 1153 spin_lock_init(&ctxp->ctxlock); 1154 1155 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1156 if (phba->ktime_on) { 1157 ctxp->ts_isr_cmd = isr_timestamp; 1158 ctxp->ts_cmd_nvme = ktime_get_ns(); 1159 ctxp->ts_nvme_data = 0; 1160 ctxp->ts_data_wqput = 0; 1161 ctxp->ts_isr_data = 0; 1162 ctxp->ts_data_nvme = 0; 1163 ctxp->ts_nvme_status = 0; 1164 ctxp->ts_status_wqput = 0; 1165 ctxp->ts_isr_status = 0; 1166 ctxp->ts_status_nvme = 0; 1167 } 1168 1169 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 1170 id = smp_processor_id(); 1171 if (id < LPFC_CHECK_CPU_CNT) 1172 phba->cpucheck_rcv_io[id]++; 1173 } 1174 #endif 1175 1176 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", 1177 oxid, size, smp_processor_id()); 1178 1179 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1180 /* 1181 * The calling sequence should be: 1182 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1183 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1184 */ 1185 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1186 payload, size); 1187 1188 /* Process FCP command */ 1189 if (rc == 0) { 1190 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1191 return; 1192 } 1193 1194 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1195 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1196 "6159 FCP Drop IO x%x: err x%x\n", 1197 ctxp->oxid, rc); 1198 dropit: 1199 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1200 oxid, size, sid); 1201 if (oxid) { 1202 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1203 return; 1204 } 1205 1206 if (nvmebuf) { 1207 nvmebuf->iocbq->hba_wqidx = 0; 1208 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1209 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1210 } 1211 #endif 1212 } 1213 1214 /** 1215 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport 1216 * @phba: pointer to lpfc hba data structure. 1217 * @pring: pointer to a SLI ring. 1218 * @nvmebuf: pointer to received nvme data structure. 1219 * 1220 * This routine is used to process an unsolicited event received from a SLI 1221 * (Service Level Interface) ring. The actual processing of the data buffer 1222 * associated with the unsolicited event is done by invoking the routine 1223 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the 1224 * SLI RQ on which the unsolicited event was received. 1225 **/ 1226 void 1227 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1228 struct lpfc_iocbq *piocb) 1229 { 1230 struct lpfc_dmabuf *d_buf; 1231 struct hbq_dmabuf *nvmebuf; 1232 1233 d_buf = piocb->context2; 1234 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1235 1236 if (phba->nvmet_support == 0) { 1237 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1238 return; 1239 } 1240 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); 1241 } 1242 1243 /** 1244 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport 1245 * @phba: pointer to lpfc hba data structure. 1246 * @pring: pointer to a SLI ring. 1247 * @nvmebuf: pointer to received nvme data structure. 1248 * 1249 * This routine is used to process an unsolicited event received from a SLI 1250 * (Service Level Interface) ring. The actual processing of the data buffer 1251 * associated with the unsolicited event is done by invoking the routine 1252 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the 1253 * SLI RQ on which the unsolicited event was received. 1254 **/ 1255 void 1256 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 1257 struct lpfc_sli_ring *pring, 1258 struct rqb_dmabuf *nvmebuf, 1259 uint64_t isr_timestamp) 1260 { 1261 if (phba->nvmet_support == 0) { 1262 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf); 1263 return; 1264 } 1265 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1266 isr_timestamp); 1267 } 1268 1269 /** 1270 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure 1271 * @phba: pointer to a host N_Port data structure. 1272 * @ctxp: Context info for NVME LS Request 1273 * @rspbuf: DMA buffer of NVME command. 1274 * @rspsize: size of the NVME command. 1275 * 1276 * This routine is used for allocating a lpfc-WQE data structure from 1277 * the driver lpfc-WQE free-list and prepare the WQE with the parameters 1278 * passed into the routine for discovery state machine to issue an Extended 1279 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation 1280 * and preparation routine that is used by all the discovery state machine 1281 * routines and the NVME command-specific fields will be later set up by 1282 * the individual discovery machine routines after calling this routine 1283 * allocating and preparing a generic WQE data structure. It fills in the 1284 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 1285 * payload and response payload (if expected). The reference count on the 1286 * ndlp is incremented by 1 and the reference to the ndlp is put into 1287 * context1 of the WQE data structure for this WQE to hold the ndlp 1288 * reference for the command's callback function to access later. 1289 * 1290 * Return code 1291 * Pointer to the newly allocated/prepared nvme wqe data structure 1292 * NULL - when nvme wqe data structure allocation/preparation failed 1293 **/ 1294 static struct lpfc_iocbq * 1295 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, 1296 struct lpfc_nvmet_rcv_ctx *ctxp, 1297 dma_addr_t rspbuf, uint16_t rspsize) 1298 { 1299 struct lpfc_nodelist *ndlp; 1300 struct lpfc_iocbq *nvmewqe; 1301 union lpfc_wqe *wqe; 1302 1303 if (!lpfc_is_link_up(phba)) { 1304 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1305 "6104 lpfc_nvmet_prep_ls_wqe: link err: " 1306 "NPORT x%x oxid:x%x\n", 1307 ctxp->sid, ctxp->oxid); 1308 return NULL; 1309 } 1310 1311 /* Allocate buffer for command wqe */ 1312 nvmewqe = lpfc_sli_get_iocbq(phba); 1313 if (nvmewqe == NULL) { 1314 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1315 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: " 1316 "NPORT x%x oxid:x%x\n", 1317 ctxp->sid, ctxp->oxid); 1318 return NULL; 1319 } 1320 1321 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1322 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1323 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1324 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1325 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1326 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: " 1327 "NPORT x%x oxid:x%x\n", 1328 ctxp->sid, ctxp->oxid); 1329 goto nvme_wqe_free_wqeq_exit; 1330 } 1331 ctxp->wqeq = nvmewqe; 1332 1333 /* prevent preparing wqe with NULL ndlp reference */ 1334 nvmewqe->context1 = lpfc_nlp_get(ndlp); 1335 if (nvmewqe->context1 == NULL) 1336 goto nvme_wqe_free_wqeq_exit; 1337 nvmewqe->context2 = ctxp; 1338 1339 wqe = &nvmewqe->wqe; 1340 memset(wqe, 0, sizeof(union lpfc_wqe)); 1341 1342 /* Words 0 - 2 */ 1343 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1344 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; 1345 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); 1346 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); 1347 1348 /* Word 3 */ 1349 1350 /* Word 4 */ 1351 1352 /* Word 5 */ 1353 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1354 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1355 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1356 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); 1357 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1358 1359 /* Word 6 */ 1360 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 1361 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1362 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); 1363 1364 /* Word 7 */ 1365 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 1366 CMD_XMIT_SEQUENCE64_WQE); 1367 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); 1368 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 1369 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 1370 1371 /* Word 8 */ 1372 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; 1373 1374 /* Word 9 */ 1375 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); 1376 /* Needs to be set by caller */ 1377 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); 1378 1379 /* Word 10 */ 1380 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 1381 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 1382 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 1383 LPFC_WQE_LENLOC_WORD12); 1384 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 1385 1386 /* Word 11 */ 1387 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, 1388 LPFC_WQE_CQ_ID_DEFAULT); 1389 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, 1390 OTHER_COMMAND); 1391 1392 /* Word 12 */ 1393 wqe->xmit_sequence.xmit_len = rspsize; 1394 1395 nvmewqe->retry = 1; 1396 nvmewqe->vport = phba->pport; 1397 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1398 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; 1399 1400 /* Xmit NVME response to remote NPORT <did> */ 1401 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1402 "6039 Xmit NVME LS response to remote " 1403 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", 1404 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, 1405 rspsize); 1406 return nvmewqe; 1407 1408 nvme_wqe_free_wqeq_exit: 1409 nvmewqe->context2 = NULL; 1410 nvmewqe->context3 = NULL; 1411 lpfc_sli_release_iocbq(phba, nvmewqe); 1412 return NULL; 1413 } 1414 1415 1416 static struct lpfc_iocbq * 1417 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, 1418 struct lpfc_nvmet_rcv_ctx *ctxp) 1419 { 1420 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; 1421 struct lpfc_nvmet_tgtport *tgtp; 1422 struct sli4_sge *sgl; 1423 struct lpfc_nodelist *ndlp; 1424 struct lpfc_iocbq *nvmewqe; 1425 struct scatterlist *sgel; 1426 union lpfc_wqe128 *wqe; 1427 uint32_t *txrdy; 1428 dma_addr_t physaddr; 1429 int i, cnt; 1430 int xc = 1; 1431 1432 if (!lpfc_is_link_up(phba)) { 1433 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1434 "6107 lpfc_nvmet_prep_fcp_wqe: link err:" 1435 "NPORT x%x oxid:x%x\n", ctxp->sid, 1436 ctxp->oxid); 1437 return NULL; 1438 } 1439 1440 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1441 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1442 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1443 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1444 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1445 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: " 1446 "NPORT x%x oxid:x%x\n", 1447 ctxp->sid, ctxp->oxid); 1448 return NULL; 1449 } 1450 1451 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { 1452 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1453 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: " 1454 "NPORT x%x oxid:x%x cnt %d\n", 1455 ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt); 1456 return NULL; 1457 } 1458 1459 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1460 nvmewqe = ctxp->wqeq; 1461 if (nvmewqe == NULL) { 1462 /* Allocate buffer for command wqe */ 1463 nvmewqe = ctxp->rqb_buffer->iocbq; 1464 if (nvmewqe == NULL) { 1465 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1466 "6110 lpfc_nvmet_prep_fcp_wqe: No " 1467 "WQE: NPORT x%x oxid:x%x\n", 1468 ctxp->sid, ctxp->oxid); 1469 return NULL; 1470 } 1471 ctxp->wqeq = nvmewqe; 1472 xc = 0; /* create new XRI */ 1473 nvmewqe->sli4_lxritag = NO_XRI; 1474 nvmewqe->sli4_xritag = NO_XRI; 1475 } 1476 1477 /* Sanity check */ 1478 if (((ctxp->state == LPFC_NVMET_STE_RCV) && 1479 (ctxp->entry_cnt == 1)) || 1480 ((ctxp->state == LPFC_NVMET_STE_DATA) && 1481 (ctxp->entry_cnt > 1))) { 1482 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 1483 } else { 1484 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1485 "6111 Wrong state %s: %d cnt %d\n", 1486 __func__, ctxp->state, ctxp->entry_cnt); 1487 return NULL; 1488 } 1489 1490 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl; 1491 switch (rsp->op) { 1492 case NVMET_FCOP_READDATA: 1493 case NVMET_FCOP_READDATA_RSP: 1494 /* Words 0 - 2 : The first sg segment */ 1495 sgel = &rsp->sg[0]; 1496 physaddr = sg_dma_address(sgel); 1497 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1498 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); 1499 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); 1500 wqe->fcp_tsend.bde.addrHigh = 1501 cpu_to_le32(putPaddrHigh(physaddr)); 1502 1503 /* Word 3 */ 1504 wqe->fcp_tsend.payload_offset_len = 0; 1505 1506 /* Word 4 */ 1507 wqe->fcp_tsend.relative_offset = ctxp->offset; 1508 1509 /* Word 5 */ 1510 1511 /* Word 6 */ 1512 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, 1513 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1514 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, 1515 nvmewqe->sli4_xritag); 1516 1517 /* Word 7 */ 1518 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 1519 1520 /* Word 8 */ 1521 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; 1522 1523 /* Word 9 */ 1524 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); 1525 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); 1526 1527 /* Word 10 */ 1528 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1529 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); 1530 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); 1531 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, 1532 LPFC_WQE_LENLOC_WORD12); 1533 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); 1534 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); 1535 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1536 if (phba->cfg_nvme_oas) 1537 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); 1538 1539 /* Word 11 */ 1540 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, 1541 LPFC_WQE_CQ_ID_DEFAULT); 1542 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, 1543 FCP_COMMAND_TSEND); 1544 1545 /* Word 12 */ 1546 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1547 1548 /* Setup 2 SKIP SGEs */ 1549 sgl->addr_hi = 0; 1550 sgl->addr_lo = 0; 1551 sgl->word2 = 0; 1552 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1553 sgl->word2 = cpu_to_le32(sgl->word2); 1554 sgl->sge_len = 0; 1555 sgl++; 1556 sgl->addr_hi = 0; 1557 sgl->addr_lo = 0; 1558 sgl->word2 = 0; 1559 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1560 sgl->word2 = cpu_to_le32(sgl->word2); 1561 sgl->sge_len = 0; 1562 sgl++; 1563 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 1564 atomic_inc(&tgtp->xmt_fcp_read_rsp); 1565 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 1566 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && 1567 (rsp->rsplen == 12)) { 1568 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); 1569 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1570 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1571 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1572 } else { 1573 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1574 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); 1575 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); 1576 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 1577 ((rsp->rsplen >> 2) - 1)); 1578 memcpy(&wqe->words[16], rsp->rspaddr, 1579 rsp->rsplen); 1580 } 1581 } else { 1582 atomic_inc(&tgtp->xmt_fcp_read); 1583 1584 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1585 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1586 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1587 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); 1588 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1589 } 1590 ctxp->state = LPFC_NVMET_STE_DATA; 1591 break; 1592 1593 case NVMET_FCOP_WRITEDATA: 1594 /* Words 0 - 2 : The first sg segment */ 1595 txrdy = pci_pool_alloc(phba->txrdy_payload_pool, 1596 GFP_KERNEL, &physaddr); 1597 if (!txrdy) { 1598 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1599 "6041 Bad txrdy buffer: oxid x%x\n", 1600 ctxp->oxid); 1601 return NULL; 1602 } 1603 ctxp->txrdy = txrdy; 1604 ctxp->txrdy_phys = physaddr; 1605 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1606 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; 1607 wqe->fcp_treceive.bde.addrLow = 1608 cpu_to_le32(putPaddrLow(physaddr)); 1609 wqe->fcp_treceive.bde.addrHigh = 1610 cpu_to_le32(putPaddrHigh(physaddr)); 1611 1612 /* Word 3 */ 1613 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; 1614 1615 /* Word 4 */ 1616 wqe->fcp_treceive.relative_offset = ctxp->offset; 1617 1618 /* Word 5 */ 1619 1620 /* Word 6 */ 1621 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, 1622 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1623 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, 1624 nvmewqe->sli4_xritag); 1625 1626 /* Word 7 */ 1627 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); 1628 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, 1629 CMD_FCP_TRECEIVE64_WQE); 1630 1631 /* Word 8 */ 1632 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; 1633 1634 /* Word 9 */ 1635 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); 1636 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); 1637 1638 /* Word 10 */ 1639 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1640 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); 1641 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); 1642 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, 1643 LPFC_WQE_LENLOC_WORD12); 1644 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); 1645 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); 1646 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 1647 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 1648 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1649 if (phba->cfg_nvme_oas) 1650 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); 1651 1652 /* Word 11 */ 1653 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, 1654 LPFC_WQE_CQ_ID_DEFAULT); 1655 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, 1656 FCP_COMMAND_TRECEIVE); 1657 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1658 1659 /* Word 12 */ 1660 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1661 1662 /* Setup 1 TXRDY and 1 SKIP SGE */ 1663 txrdy[0] = 0; 1664 txrdy[1] = cpu_to_be32(rsp->transfer_length); 1665 txrdy[2] = 0; 1666 1667 sgl->addr_hi = putPaddrHigh(physaddr); 1668 sgl->addr_lo = putPaddrLow(physaddr); 1669 sgl->word2 = 0; 1670 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1671 sgl->word2 = cpu_to_le32(sgl->word2); 1672 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); 1673 sgl++; 1674 sgl->addr_hi = 0; 1675 sgl->addr_lo = 0; 1676 sgl->word2 = 0; 1677 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1678 sgl->word2 = cpu_to_le32(sgl->word2); 1679 sgl->sge_len = 0; 1680 sgl++; 1681 ctxp->state = LPFC_NVMET_STE_DATA; 1682 atomic_inc(&tgtp->xmt_fcp_write); 1683 break; 1684 1685 case NVMET_FCOP_RSP: 1686 /* Words 0 - 2 */ 1687 physaddr = rsp->rspdma; 1688 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1689 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 1690 wqe->fcp_trsp.bde.addrLow = 1691 cpu_to_le32(putPaddrLow(physaddr)); 1692 wqe->fcp_trsp.bde.addrHigh = 1693 cpu_to_le32(putPaddrHigh(physaddr)); 1694 1695 /* Word 3 */ 1696 wqe->fcp_trsp.response_len = rsp->rsplen; 1697 1698 /* Word 4 */ 1699 wqe->fcp_trsp.rsvd_4_5[0] = 0; 1700 1701 1702 /* Word 5 */ 1703 1704 /* Word 6 */ 1705 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, 1706 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1707 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, 1708 nvmewqe->sli4_xritag); 1709 1710 /* Word 7 */ 1711 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); 1712 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); 1713 1714 /* Word 8 */ 1715 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; 1716 1717 /* Word 9 */ 1718 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); 1719 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); 1720 1721 /* Word 10 */ 1722 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 1723 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); 1724 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); 1725 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, 1726 LPFC_WQE_LENLOC_WORD3); 1727 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); 1728 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 1729 if (phba->cfg_nvme_oas) 1730 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); 1731 1732 /* Word 11 */ 1733 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, 1734 LPFC_WQE_CQ_ID_DEFAULT); 1735 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, 1736 FCP_COMMAND_TRSP); 1737 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1738 ctxp->state = LPFC_NVMET_STE_RSP; 1739 1740 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { 1741 /* Good response - all zero's on wire */ 1742 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); 1743 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 1744 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 1745 } else { 1746 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); 1747 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); 1748 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 1749 ((rsp->rsplen >> 2) - 1)); 1750 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 1751 } 1752 1753 /* Use rspbuf, NOT sg list */ 1754 rsp->sg_cnt = 0; 1755 sgl->word2 = 0; 1756 atomic_inc(&tgtp->xmt_fcp_rsp); 1757 break; 1758 1759 default: 1760 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 1761 "6064 Unknown Rsp Op %d\n", 1762 rsp->op); 1763 return NULL; 1764 } 1765 1766 nvmewqe->retry = 1; 1767 nvmewqe->vport = phba->pport; 1768 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1769 nvmewqe->context1 = ndlp; 1770 1771 for (i = 0; i < rsp->sg_cnt; i++) { 1772 sgel = &rsp->sg[i]; 1773 physaddr = sg_dma_address(sgel); 1774 cnt = sg_dma_len(sgel); 1775 sgl->addr_hi = putPaddrHigh(physaddr); 1776 sgl->addr_lo = putPaddrLow(physaddr); 1777 sgl->word2 = 0; 1778 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1779 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); 1780 if ((i+1) == rsp->sg_cnt) 1781 bf_set(lpfc_sli4_sge_last, sgl, 1); 1782 sgl->word2 = cpu_to_le32(sgl->word2); 1783 sgl->sge_len = cpu_to_le32(cnt); 1784 sgl++; 1785 ctxp->offset += cnt; 1786 } 1787 return nvmewqe; 1788 } 1789 1790 /** 1791 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS 1792 * @phba: Pointer to HBA context object. 1793 * @cmdwqe: Pointer to driver command WQE object. 1794 * @wcqe: Pointer to driver response CQE object. 1795 * 1796 * The function is called from SLI ring event handler with no 1797 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 1798 * The function frees memory resources used for the NVME commands. 1799 **/ 1800 static void 1801 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1802 struct lpfc_wcqe_complete *wcqe) 1803 { 1804 struct lpfc_nvmet_rcv_ctx *ctxp; 1805 struct lpfc_nvmet_tgtport *tgtp; 1806 uint32_t status, result; 1807 unsigned long flags; 1808 bool released = false; 1809 1810 ctxp = cmdwqe->context2; 1811 status = bf_get(lpfc_wcqe_c_status, wcqe); 1812 result = wcqe->parameter; 1813 1814 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1815 atomic_inc(&tgtp->xmt_abort_cmpl); 1816 1817 ctxp->state = LPFC_NVMET_STE_DONE; 1818 1819 /* Check if we already received a free context call 1820 * and we have completed processing an abort situation. 1821 */ 1822 spin_lock_irqsave(&ctxp->ctxlock, flags); 1823 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 1824 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 1825 list_del(&ctxp->list); 1826 released = true; 1827 } 1828 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 1829 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 1830 1831 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 1832 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 1833 "WCQE: %08x %08x %08x %08x\n", 1834 ctxp->oxid, ctxp->flag, released, 1835 wcqe->word0, wcqe->total_data_placed, 1836 result, wcqe->word3); 1837 1838 /* 1839 * if transport has released ctx, then can reuse it. Otherwise, 1840 * will be recycled by transport release call. 1841 */ 1842 if (released) 1843 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1844 1845 cmdwqe->context2 = NULL; 1846 cmdwqe->context3 = NULL; 1847 lpfc_sli_release_iocbq(phba, cmdwqe); 1848 1849 /* Since iaab/iaar are NOT set, there is no work left. 1850 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 1851 * should have been called already. 1852 */ 1853 } 1854 1855 /** 1856 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS 1857 * @phba: Pointer to HBA context object. 1858 * @cmdwqe: Pointer to driver command WQE object. 1859 * @wcqe: Pointer to driver response CQE object. 1860 * 1861 * The function is called from SLI ring event handler with no 1862 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 1863 * The function frees memory resources used for the NVME commands. 1864 **/ 1865 static void 1866 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1867 struct lpfc_wcqe_complete *wcqe) 1868 { 1869 struct lpfc_nvmet_rcv_ctx *ctxp; 1870 struct lpfc_nvmet_tgtport *tgtp; 1871 unsigned long flags; 1872 uint32_t status, result; 1873 bool released = false; 1874 1875 ctxp = cmdwqe->context2; 1876 status = bf_get(lpfc_wcqe_c_status, wcqe); 1877 result = wcqe->parameter; 1878 1879 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1880 atomic_inc(&tgtp->xmt_abort_cmpl); 1881 1882 if (!ctxp) { 1883 /* if context is clear, related io alrady complete */ 1884 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1885 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", 1886 wcqe->word0, wcqe->total_data_placed, 1887 result, wcqe->word3); 1888 return; 1889 } 1890 1891 /* Sanity check */ 1892 if (ctxp->state != LPFC_NVMET_STE_ABORT) { 1893 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 1894 "6112 ABTS Wrong state:%d oxid x%x\n", 1895 ctxp->state, ctxp->oxid); 1896 } 1897 1898 /* Check if we already received a free context call 1899 * and we have completed processing an abort situation. 1900 */ 1901 ctxp->state = LPFC_NVMET_STE_DONE; 1902 spin_lock_irqsave(&ctxp->ctxlock, flags); 1903 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 1904 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 1905 list_del(&ctxp->list); 1906 released = true; 1907 } 1908 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 1909 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 1910 1911 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1912 "6316 ABTS cmpl xri x%x flg x%x (%x) " 1913 "WCQE: %08x %08x %08x %08x\n", 1914 ctxp->oxid, ctxp->flag, released, 1915 wcqe->word0, wcqe->total_data_placed, 1916 result, wcqe->word3); 1917 /* 1918 * if transport has released ctx, then can reuse it. Otherwise, 1919 * will be recycled by transport release call. 1920 */ 1921 if (released) 1922 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); 1923 1924 cmdwqe->context2 = NULL; 1925 cmdwqe->context3 = NULL; 1926 1927 /* Since iaab/iaar are NOT set, there is no work left. 1928 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 1929 * should have been called already. 1930 */ 1931 } 1932 1933 /** 1934 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS 1935 * @phba: Pointer to HBA context object. 1936 * @cmdwqe: Pointer to driver command WQE object. 1937 * @wcqe: Pointer to driver response CQE object. 1938 * 1939 * The function is called from SLI ring event handler with no 1940 * lock held. This function is the completion handler for NVME ABTS for LS cmds 1941 * The function frees memory resources used for the NVME commands. 1942 **/ 1943 static void 1944 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 1945 struct lpfc_wcqe_complete *wcqe) 1946 { 1947 struct lpfc_nvmet_rcv_ctx *ctxp; 1948 struct lpfc_nvmet_tgtport *tgtp; 1949 uint32_t status, result; 1950 1951 ctxp = cmdwqe->context2; 1952 status = bf_get(lpfc_wcqe_c_status, wcqe); 1953 result = wcqe->parameter; 1954 1955 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1956 atomic_inc(&tgtp->xmt_abort_cmpl); 1957 1958 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1959 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n", 1960 ctxp, wcqe->word0, wcqe->total_data_placed, 1961 result, wcqe->word3); 1962 1963 if (ctxp) { 1964 cmdwqe->context2 = NULL; 1965 cmdwqe->context3 = NULL; 1966 lpfc_sli_release_iocbq(phba, cmdwqe); 1967 kfree(ctxp); 1968 } else 1969 lpfc_sli_release_iocbq(phba, cmdwqe); 1970 } 1971 1972 static int 1973 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, 1974 struct lpfc_nvmet_rcv_ctx *ctxp, 1975 uint32_t sid, uint16_t xri) 1976 { 1977 struct lpfc_nvmet_tgtport *tgtp; 1978 struct lpfc_iocbq *abts_wqeq; 1979 union lpfc_wqe *wqe_abts; 1980 struct lpfc_nodelist *ndlp; 1981 1982 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1983 "6067 ABTS: sid %x xri x%x/x%x\n", 1984 sid, xri, ctxp->wqeq->sli4_xritag); 1985 1986 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1987 if (!ctxp->wqeq) { 1988 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 1989 ctxp->wqeq->hba_wqidx = 0; 1990 } 1991 1992 ndlp = lpfc_findnode_did(phba->pport, sid); 1993 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1994 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1995 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1996 atomic_inc(&tgtp->xmt_abort_rsp_error); 1997 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 1998 "6134 Drop ABTS - wrong NDLP state x%x.\n", 1999 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2000 2001 /* No failure to an ABTS request. */ 2002 return 0; 2003 } 2004 2005 abts_wqeq = ctxp->wqeq; 2006 wqe_abts = &abts_wqeq->wqe; 2007 ctxp->state = LPFC_NVMET_STE_ABORT; 2008 2009 /* 2010 * Since we zero the whole WQE, we need to ensure we set the WQE fields 2011 * that were initialized in lpfc_sli4_nvmet_alloc. 2012 */ 2013 memset(wqe_abts, 0, sizeof(union lpfc_wqe)); 2014 2015 /* Word 5 */ 2016 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); 2017 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); 2018 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); 2019 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); 2020 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); 2021 2022 /* Word 6 */ 2023 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, 2024 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2025 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, 2026 abts_wqeq->sli4_xritag); 2027 2028 /* Word 7 */ 2029 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, 2030 CMD_XMIT_SEQUENCE64_WQE); 2031 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); 2032 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); 2033 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); 2034 2035 /* Word 8 */ 2036 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; 2037 2038 /* Word 9 */ 2039 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); 2040 /* Needs to be set by caller */ 2041 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); 2042 2043 /* Word 10 */ 2044 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); 2045 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 2046 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, 2047 LPFC_WQE_LENLOC_WORD12); 2048 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); 2049 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); 2050 2051 /* Word 11 */ 2052 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, 2053 LPFC_WQE_CQ_ID_DEFAULT); 2054 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, 2055 OTHER_COMMAND); 2056 2057 abts_wqeq->vport = phba->pport; 2058 abts_wqeq->context1 = ndlp; 2059 abts_wqeq->context2 = ctxp; 2060 abts_wqeq->context3 = NULL; 2061 abts_wqeq->rsvd2 = 0; 2062 /* hba_wqidx should already be setup from command we are aborting */ 2063 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2064 abts_wqeq->iocb.ulpLe = 1; 2065 2066 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2067 "6069 Issue ABTS to xri x%x reqtag x%x\n", 2068 xri, abts_wqeq->iotag); 2069 return 1; 2070 } 2071 2072 static int 2073 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, 2074 struct lpfc_nvmet_rcv_ctx *ctxp, 2075 uint32_t sid, uint16_t xri) 2076 { 2077 struct lpfc_nvmet_tgtport *tgtp; 2078 struct lpfc_iocbq *abts_wqeq; 2079 union lpfc_wqe *abts_wqe; 2080 struct lpfc_nodelist *ndlp; 2081 unsigned long flags; 2082 int rc; 2083 2084 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2085 if (!ctxp->wqeq) { 2086 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2087 ctxp->wqeq->hba_wqidx = 0; 2088 } 2089 2090 ndlp = lpfc_findnode_did(phba->pport, sid); 2091 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2092 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2093 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2094 atomic_inc(&tgtp->xmt_abort_rsp_error); 2095 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2096 "6160 Drop ABORT - wrong NDLP state x%x.\n", 2097 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2098 2099 /* No failure to an ABTS request. */ 2100 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2101 return 0; 2102 } 2103 2104 /* Issue ABTS for this WQE based on iotag */ 2105 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2106 if (!ctxp->abort_wqeq) { 2107 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2108 "6161 ABORT failed: No wqeqs: " 2109 "xri: x%x\n", ctxp->oxid); 2110 /* No failure to an ABTS request. */ 2111 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2112 return 0; 2113 } 2114 abts_wqeq = ctxp->abort_wqeq; 2115 abts_wqe = &abts_wqeq->wqe; 2116 ctxp->state = LPFC_NVMET_STE_ABORT; 2117 2118 /* Announce entry to new IO submit field. */ 2119 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2120 "6162 ABORT Request to rport DID x%06x " 2121 "for xri x%x x%x\n", 2122 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); 2123 2124 /* If the hba is getting reset, this flag is set. It is 2125 * cleared when the reset is complete and rings reestablished. 2126 */ 2127 spin_lock_irqsave(&phba->hbalock, flags); 2128 /* driver queued commands are in process of being flushed */ 2129 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2130 spin_unlock_irqrestore(&phba->hbalock, flags); 2131 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2132 "6163 Driver in reset cleanup - flushing " 2133 "NVME Req now. hba_flag x%x oxid x%x\n", 2134 phba->hba_flag, ctxp->oxid); 2135 lpfc_sli_release_iocbq(phba, abts_wqeq); 2136 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2137 return 0; 2138 } 2139 2140 /* Outstanding abort is in progress */ 2141 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2142 spin_unlock_irqrestore(&phba->hbalock, flags); 2143 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2144 "6164 Outstanding NVME I/O Abort Request " 2145 "still pending on oxid x%x\n", 2146 ctxp->oxid); 2147 lpfc_sli_release_iocbq(phba, abts_wqeq); 2148 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2149 return 0; 2150 } 2151 2152 /* Ready - mark outstanding as aborted by driver. */ 2153 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; 2154 2155 /* WQEs are reused. Clear stale data and set key fields to 2156 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 2157 */ 2158 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 2159 2160 /* word 3 */ 2161 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 2162 2163 /* word 7 */ 2164 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 2165 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 2166 2167 /* word 8 - tell the FW to abort the IO associated with this 2168 * outstanding exchange ID. 2169 */ 2170 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; 2171 2172 /* word 9 - this is the iotag for the abts_wqe completion. */ 2173 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 2174 abts_wqeq->iotag); 2175 2176 /* word 10 */ 2177 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 2178 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 2179 2180 /* word 11 */ 2181 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 2182 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 2183 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 2184 2185 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 2186 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; 2187 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; 2188 abts_wqeq->iocb_cmpl = 0; 2189 abts_wqeq->iocb_flag |= LPFC_IO_NVME; 2190 abts_wqeq->context2 = ctxp; 2191 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2192 spin_unlock_irqrestore(&phba->hbalock, flags); 2193 if (rc == WQE_SUCCESS) 2194 return 0; 2195 2196 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2197 lpfc_sli_release_iocbq(phba, abts_wqeq); 2198 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2199 "6166 Failed ABORT issue_wqe with status x%x " 2200 "for oxid x%x.\n", 2201 rc, ctxp->oxid); 2202 return 1; 2203 } 2204 2205 2206 static int 2207 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, 2208 struct lpfc_nvmet_rcv_ctx *ctxp, 2209 uint32_t sid, uint16_t xri) 2210 { 2211 struct lpfc_nvmet_tgtport *tgtp; 2212 struct lpfc_iocbq *abts_wqeq; 2213 unsigned long flags; 2214 int rc; 2215 2216 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2217 if (!ctxp->wqeq) { 2218 ctxp->wqeq = ctxp->rqb_buffer->iocbq; 2219 ctxp->wqeq->hba_wqidx = 0; 2220 } 2221 2222 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2223 if (rc == 0) 2224 goto aerr; 2225 2226 spin_lock_irqsave(&phba->hbalock, flags); 2227 abts_wqeq = ctxp->wqeq; 2228 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; 2229 abts_wqeq->iocb_cmpl = NULL; 2230 abts_wqeq->iocb_flag |= LPFC_IO_NVMET; 2231 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2232 spin_unlock_irqrestore(&phba->hbalock, flags); 2233 if (rc == WQE_SUCCESS) { 2234 atomic_inc(&tgtp->xmt_abort_rsp); 2235 return 0; 2236 } 2237 2238 aerr: 2239 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2240 atomic_inc(&tgtp->xmt_abort_rsp_error); 2241 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2242 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", 2243 ctxp->oxid, rc); 2244 return 1; 2245 } 2246 2247 static int 2248 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, 2249 struct lpfc_nvmet_rcv_ctx *ctxp, 2250 uint32_t sid, uint16_t xri) 2251 { 2252 struct lpfc_nvmet_tgtport *tgtp; 2253 struct lpfc_iocbq *abts_wqeq; 2254 union lpfc_wqe *wqe_abts; 2255 unsigned long flags; 2256 int rc; 2257 2258 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2259 if (!ctxp->wqeq) { 2260 /* Issue ABTS for this WQE based on iotag */ 2261 ctxp->wqeq = lpfc_sli_get_iocbq(phba); 2262 if (!ctxp->wqeq) { 2263 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2264 "6068 Abort failed: No wqeqs: " 2265 "xri: x%x\n", xri); 2266 /* No failure to an ABTS request. */ 2267 kfree(ctxp); 2268 return 0; 2269 } 2270 } 2271 abts_wqeq = ctxp->wqeq; 2272 wqe_abts = &abts_wqeq->wqe; 2273 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2274 2275 spin_lock_irqsave(&phba->hbalock, flags); 2276 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; 2277 abts_wqeq->iocb_cmpl = 0; 2278 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; 2279 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2280 spin_unlock_irqrestore(&phba->hbalock, flags); 2281 if (rc == WQE_SUCCESS) { 2282 atomic_inc(&tgtp->xmt_abort_rsp); 2283 return 0; 2284 } 2285 2286 atomic_inc(&tgtp->xmt_abort_rsp_error); 2287 abts_wqeq->context2 = NULL; 2288 abts_wqeq->context3 = NULL; 2289 lpfc_sli_release_iocbq(phba, abts_wqeq); 2290 kfree(ctxp); 2291 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS, 2292 "6056 Failed to Issue ABTS. Status x%x\n", rc); 2293 return 0; 2294 } 2295