1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channsel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include <../drivers/nvme/host/nvme.h> 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_version.h" 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc.h" 50 #include "lpfc_scsi.h" 51 #include "lpfc_nvme.h" 52 #include "lpfc_nvmet.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_vport.h" 56 #include "lpfc_debugfs.h" 57 58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 59 struct lpfc_nvmet_rcv_ctx *, 60 dma_addr_t rspbuf, 61 uint16_t rspsize); 62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, 63 struct lpfc_nvmet_rcv_ctx *); 64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, 65 struct lpfc_nvmet_rcv_ctx *, 66 uint32_t, uint16_t); 67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, 68 struct lpfc_nvmet_rcv_ctx *, 69 uint32_t, uint16_t); 70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, 71 struct lpfc_nvmet_rcv_ctx *, 72 uint32_t, uint16_t); 73 74 void 75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) 76 { 77 unsigned long iflag; 78 79 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 80 "6313 NVMET Defer ctx release xri x%x flg x%x\n", 81 ctxp->oxid, ctxp->flag); 82 83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) { 85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 86 iflag); 87 return; 88 } 89 ctxp->flag |= LPFC_NVMET_CTX_RLS; 90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 92 } 93 94 /** 95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response 96 * @phba: Pointer to HBA context object. 97 * @cmdwqe: Pointer to driver command WQE object. 98 * @wcqe: Pointer to driver response CQE object. 99 * 100 * The function is called from SLI ring event handler with no 101 * lock held. This function is the completion handler for NVME LS commands 102 * The function frees memory resources used for the NVME commands. 103 **/ 104 static void 105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 106 struct lpfc_wcqe_complete *wcqe) 107 { 108 struct lpfc_nvmet_tgtport *tgtp; 109 struct nvmefc_tgt_ls_req *rsp; 110 struct lpfc_nvmet_rcv_ctx *ctxp; 111 uint32_t status, result; 112 113 status = bf_get(lpfc_wcqe_c_status, wcqe); 114 result = wcqe->parameter; 115 ctxp = cmdwqe->context2; 116 117 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { 118 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 119 "6410 NVMET LS cmpl state mismatch IO x%x: " 120 "%d %d\n", 121 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 122 } 123 124 if (!phba->targetport) 125 goto out; 126 127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 128 129 if (status) 130 atomic_inc(&tgtp->xmt_ls_rsp_error); 131 else 132 atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 133 134 out: 135 rsp = &ctxp->ctx.ls_req; 136 137 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", 138 ctxp->oxid, status, result); 139 140 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 141 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", 142 status, result, ctxp->oxid); 143 144 lpfc_nlp_put(cmdwqe->context1); 145 cmdwqe->context2 = NULL; 146 cmdwqe->context3 = NULL; 147 lpfc_sli_release_iocbq(phba, cmdwqe); 148 rsp->done(rsp); 149 kfree(ctxp); 150 } 151 152 /** 153 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context 154 * @phba: HBA buffer is associated with 155 * @ctxp: context to clean up 156 * @mp: Buffer to free 157 * 158 * Description: Frees the given DMA buffer in the appropriate way given by 159 * reposting it to its associated RQ so it can be reused. 160 * 161 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 162 * 163 * Returns: None 164 **/ 165 void 166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) 167 { 168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 169 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; 170 struct lpfc_nvmet_tgtport *tgtp; 171 struct fc_frame_header *fc_hdr; 172 struct rqb_dmabuf *nvmebuf; 173 struct lpfc_nvmet_ctx_info *infop; 174 uint32_t *payload; 175 uint32_t size, oxid, sid, rc; 176 int cpu; 177 unsigned long iflag; 178 179 if (ctxp->txrdy) { 180 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 181 ctxp->txrdy_phys); 182 ctxp->txrdy = NULL; 183 ctxp->txrdy_phys = 0; 184 } 185 186 if (ctxp->state == LPFC_NVMET_STE_FREE) { 187 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 188 "6411 NVMET free, already free IO x%x: %d %d\n", 189 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 190 } 191 ctxp->state = LPFC_NVMET_STE_FREE; 192 193 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 194 if (phba->sli4_hba.nvmet_io_wait_cnt) { 195 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, 196 nvmebuf, struct rqb_dmabuf, 197 hbuf.list); 198 phba->sli4_hba.nvmet_io_wait_cnt--; 199 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 200 iflag); 201 202 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 203 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 204 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 205 payload = (uint32_t *)(nvmebuf->dbuf.virt); 206 size = nvmebuf->bytes_recv; 207 sid = sli4_sid_from_fc_hdr(fc_hdr); 208 209 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 210 ctxp->wqeq = NULL; 211 ctxp->txrdy = NULL; 212 ctxp->offset = 0; 213 ctxp->phba = phba; 214 ctxp->size = size; 215 ctxp->oxid = oxid; 216 ctxp->sid = sid; 217 ctxp->state = LPFC_NVMET_STE_RCV; 218 ctxp->entry_cnt = 1; 219 ctxp->flag = 0; 220 ctxp->ctxbuf = ctx_buf; 221 spin_lock_init(&ctxp->ctxlock); 222 223 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 224 if (ctxp->ts_cmd_nvme) { 225 ctxp->ts_cmd_nvme = ktime_get_ns(); 226 ctxp->ts_nvme_data = 0; 227 ctxp->ts_data_wqput = 0; 228 ctxp->ts_isr_data = 0; 229 ctxp->ts_data_nvme = 0; 230 ctxp->ts_nvme_status = 0; 231 ctxp->ts_status_wqput = 0; 232 ctxp->ts_isr_status = 0; 233 ctxp->ts_status_nvme = 0; 234 } 235 #endif 236 atomic_inc(&tgtp->rcv_fcp_cmd_in); 237 /* 238 * The calling sequence should be: 239 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done 240 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 241 * When we return from nvmet_fc_rcv_fcp_req, all relevant info 242 * the NVME command / FC header is stored. 243 * A buffer has already been reposted for this IO, so just free 244 * the nvmebuf. 245 */ 246 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 247 payload, size); 248 249 /* Process FCP command */ 250 if (rc == 0) { 251 atomic_inc(&tgtp->rcv_fcp_cmd_out); 252 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 253 return; 254 } 255 256 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 257 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 258 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 259 ctxp->oxid, rc, 260 atomic_read(&tgtp->rcv_fcp_cmd_in), 261 atomic_read(&tgtp->rcv_fcp_cmd_out), 262 atomic_read(&tgtp->xmt_fcp_release)); 263 264 lpfc_nvmet_defer_release(phba, ctxp); 265 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 266 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 267 return; 268 } 269 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 270 271 /* 272 * Use the CPU context list, from the MRQ the IO was received on 273 * (ctxp->idx), to save context structure. 274 */ 275 cpu = smp_processor_id(); 276 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx); 277 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag); 278 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); 279 infop->nvmet_ctx_list_cnt++; 280 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag); 281 #endif 282 } 283 284 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 285 static void 286 lpfc_nvmet_ktime(struct lpfc_hba *phba, 287 struct lpfc_nvmet_rcv_ctx *ctxp) 288 { 289 uint64_t seg1, seg2, seg3, seg4, seg5; 290 uint64_t seg6, seg7, seg8, seg9, seg10; 291 uint64_t segsum; 292 293 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 294 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 295 !ctxp->ts_isr_data || !ctxp->ts_data_nvme || 296 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || 297 !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 298 return; 299 300 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd) 301 return; 302 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 303 return; 304 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 305 return; 306 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) 307 return; 308 if (ctxp->ts_data_wqput > ctxp->ts_isr_data) 309 return; 310 if (ctxp->ts_isr_data > ctxp->ts_data_nvme) 311 return; 312 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) 313 return; 314 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) 315 return; 316 if (ctxp->ts_status_wqput > ctxp->ts_isr_status) 317 return; 318 if (ctxp->ts_isr_status > ctxp->ts_status_nvme) 319 return; 320 /* 321 * Segment 1 - Time from FCP command received by MSI-X ISR 322 * to FCP command is passed to NVME Layer. 323 * Segment 2 - Time from FCP command payload handed 324 * off to NVME Layer to Driver receives a Command op 325 * from NVME Layer. 326 * Segment 3 - Time from Driver receives a Command op 327 * from NVME Layer to Command is put on WQ. 328 * Segment 4 - Time from Driver WQ put is done 329 * to MSI-X ISR for Command cmpl. 330 * Segment 5 - Time from MSI-X ISR for Command cmpl to 331 * Command cmpl is passed to NVME Layer. 332 * Segment 6 - Time from Command cmpl is passed to NVME 333 * Layer to Driver receives a RSP op from NVME Layer. 334 * Segment 7 - Time from Driver receives a RSP op from 335 * NVME Layer to WQ put is done on TRSP FCP Status. 336 * Segment 8 - Time from Driver WQ put is done on TRSP 337 * FCP Status to MSI-X ISR for TRSP cmpl. 338 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to 339 * TRSP cmpl is passed to NVME Layer. 340 * Segment 10 - Time from FCP command received by 341 * MSI-X ISR to command is completed on wire. 342 * (Segments 1 thru 8) for READDATA / WRITEDATA 343 * (Segments 1 thru 4) for READDATA_RSP 344 */ 345 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 346 segsum = seg1; 347 348 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd; 349 if (segsum > seg2) 350 return; 351 seg2 -= segsum; 352 segsum += seg2; 353 354 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd; 355 if (segsum > seg3) 356 return; 357 seg3 -= segsum; 358 segsum += seg3; 359 360 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd; 361 if (segsum > seg4) 362 return; 363 seg4 -= segsum; 364 segsum += seg4; 365 366 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd; 367 if (segsum > seg5) 368 return; 369 seg5 -= segsum; 370 segsum += seg5; 371 372 373 /* For auto rsp commands seg6 thru seg10 will be 0 */ 374 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 375 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd; 376 if (segsum > seg6) 377 return; 378 seg6 -= segsum; 379 segsum += seg6; 380 381 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd; 382 if (segsum > seg7) 383 return; 384 seg7 -= segsum; 385 segsum += seg7; 386 387 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd; 388 if (segsum > seg8) 389 return; 390 seg8 -= segsum; 391 segsum += seg8; 392 393 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd; 394 if (segsum > seg9) 395 return; 396 seg9 -= segsum; 397 segsum += seg9; 398 399 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd) 400 return; 401 seg10 = (ctxp->ts_isr_status - 402 ctxp->ts_isr_cmd); 403 } else { 404 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd) 405 return; 406 seg6 = 0; 407 seg7 = 0; 408 seg8 = 0; 409 seg9 = 0; 410 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); 411 } 412 413 phba->ktime_seg1_total += seg1; 414 if (seg1 < phba->ktime_seg1_min) 415 phba->ktime_seg1_min = seg1; 416 else if (seg1 > phba->ktime_seg1_max) 417 phba->ktime_seg1_max = seg1; 418 419 phba->ktime_seg2_total += seg2; 420 if (seg2 < phba->ktime_seg2_min) 421 phba->ktime_seg2_min = seg2; 422 else if (seg2 > phba->ktime_seg2_max) 423 phba->ktime_seg2_max = seg2; 424 425 phba->ktime_seg3_total += seg3; 426 if (seg3 < phba->ktime_seg3_min) 427 phba->ktime_seg3_min = seg3; 428 else if (seg3 > phba->ktime_seg3_max) 429 phba->ktime_seg3_max = seg3; 430 431 phba->ktime_seg4_total += seg4; 432 if (seg4 < phba->ktime_seg4_min) 433 phba->ktime_seg4_min = seg4; 434 else if (seg4 > phba->ktime_seg4_max) 435 phba->ktime_seg4_max = seg4; 436 437 phba->ktime_seg5_total += seg5; 438 if (seg5 < phba->ktime_seg5_min) 439 phba->ktime_seg5_min = seg5; 440 else if (seg5 > phba->ktime_seg5_max) 441 phba->ktime_seg5_max = seg5; 442 443 phba->ktime_data_samples++; 444 if (!seg6) 445 goto out; 446 447 phba->ktime_seg6_total += seg6; 448 if (seg6 < phba->ktime_seg6_min) 449 phba->ktime_seg6_min = seg6; 450 else if (seg6 > phba->ktime_seg6_max) 451 phba->ktime_seg6_max = seg6; 452 453 phba->ktime_seg7_total += seg7; 454 if (seg7 < phba->ktime_seg7_min) 455 phba->ktime_seg7_min = seg7; 456 else if (seg7 > phba->ktime_seg7_max) 457 phba->ktime_seg7_max = seg7; 458 459 phba->ktime_seg8_total += seg8; 460 if (seg8 < phba->ktime_seg8_min) 461 phba->ktime_seg8_min = seg8; 462 else if (seg8 > phba->ktime_seg8_max) 463 phba->ktime_seg8_max = seg8; 464 465 phba->ktime_seg9_total += seg9; 466 if (seg9 < phba->ktime_seg9_min) 467 phba->ktime_seg9_min = seg9; 468 else if (seg9 > phba->ktime_seg9_max) 469 phba->ktime_seg9_max = seg9; 470 out: 471 phba->ktime_seg10_total += seg10; 472 if (seg10 < phba->ktime_seg10_min) 473 phba->ktime_seg10_min = seg10; 474 else if (seg10 > phba->ktime_seg10_max) 475 phba->ktime_seg10_max = seg10; 476 phba->ktime_status_samples++; 477 } 478 #endif 479 480 /** 481 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 482 * @phba: Pointer to HBA context object. 483 * @cmdwqe: Pointer to driver command WQE object. 484 * @wcqe: Pointer to driver response CQE object. 485 * 486 * The function is called from SLI ring event handler with no 487 * lock held. This function is the completion handler for NVME FCP commands 488 * The function frees memory resources used for the NVME commands. 489 **/ 490 static void 491 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 492 struct lpfc_wcqe_complete *wcqe) 493 { 494 struct lpfc_nvmet_tgtport *tgtp; 495 struct nvmefc_tgt_fcp_req *rsp; 496 struct lpfc_nvmet_rcv_ctx *ctxp; 497 uint32_t status, result, op, start_clean, logerr; 498 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 499 uint32_t id; 500 #endif 501 502 ctxp = cmdwqe->context2; 503 ctxp->flag &= ~LPFC_NVMET_IO_INP; 504 505 rsp = &ctxp->ctx.fcp_req; 506 op = rsp->op; 507 508 status = bf_get(lpfc_wcqe_c_status, wcqe); 509 result = wcqe->parameter; 510 511 if (phba->targetport) 512 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 513 else 514 tgtp = NULL; 515 516 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", 517 ctxp->oxid, op, status); 518 519 if (status) { 520 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 521 rsp->transferred_length = 0; 522 if (tgtp) 523 atomic_inc(&tgtp->xmt_fcp_rsp_error); 524 525 logerr = LOG_NVME_IOERR; 526 527 /* pick up SLI4 exhange busy condition */ 528 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 529 ctxp->flag |= LPFC_NVMET_XBUSY; 530 logerr |= LOG_NVME_ABTS; 531 532 } else { 533 ctxp->flag &= ~LPFC_NVMET_XBUSY; 534 } 535 536 lpfc_printf_log(phba, KERN_INFO, logerr, 537 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n", 538 ctxp->oxid, status, result, ctxp->flag); 539 540 } else { 541 rsp->fcp_error = NVME_SC_SUCCESS; 542 if (op == NVMET_FCOP_RSP) 543 rsp->transferred_length = rsp->rsplen; 544 else 545 rsp->transferred_length = rsp->transfer_length; 546 if (tgtp) 547 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); 548 } 549 550 if ((op == NVMET_FCOP_READDATA_RSP) || 551 (op == NVMET_FCOP_RSP)) { 552 /* Sanity check */ 553 ctxp->state = LPFC_NVMET_STE_DONE; 554 ctxp->entry_cnt++; 555 556 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 557 if (ctxp->ts_cmd_nvme) { 558 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 559 ctxp->ts_isr_data = 560 cmdwqe->isr_timestamp; 561 ctxp->ts_data_nvme = 562 ktime_get_ns(); 563 ctxp->ts_nvme_status = 564 ctxp->ts_data_nvme; 565 ctxp->ts_status_wqput = 566 ctxp->ts_data_nvme; 567 ctxp->ts_isr_status = 568 ctxp->ts_data_nvme; 569 ctxp->ts_status_nvme = 570 ctxp->ts_data_nvme; 571 } else { 572 ctxp->ts_isr_status = 573 cmdwqe->isr_timestamp; 574 ctxp->ts_status_nvme = 575 ktime_get_ns(); 576 } 577 } 578 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 579 id = smp_processor_id(); 580 if (ctxp->cpu != id) 581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 582 "6703 CPU Check cmpl: " 583 "cpu %d expect %d\n", 584 id, ctxp->cpu); 585 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 586 phba->cpucheck_cmpl_io[id]++; 587 } 588 #endif 589 rsp->done(rsp); 590 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 591 if (ctxp->ts_cmd_nvme) 592 lpfc_nvmet_ktime(phba, ctxp); 593 #endif 594 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ 595 } else { 596 ctxp->entry_cnt++; 597 start_clean = offsetof(struct lpfc_iocbq, iocb_flag); 598 memset(((char *)cmdwqe) + start_clean, 0, 599 (sizeof(struct lpfc_iocbq) - start_clean)); 600 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 601 if (ctxp->ts_cmd_nvme) { 602 ctxp->ts_isr_data = cmdwqe->isr_timestamp; 603 ctxp->ts_data_nvme = ktime_get_ns(); 604 } 605 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 606 id = smp_processor_id(); 607 if (ctxp->cpu != id) 608 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 609 "6704 CPU Check cmdcmpl: " 610 "cpu %d expect %d\n", 611 id, ctxp->cpu); 612 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 613 phba->cpucheck_ccmpl_io[id]++; 614 } 615 #endif 616 rsp->done(rsp); 617 } 618 } 619 620 static int 621 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, 622 struct nvmefc_tgt_ls_req *rsp) 623 { 624 struct lpfc_nvmet_rcv_ctx *ctxp = 625 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); 626 struct lpfc_hba *phba = ctxp->phba; 627 struct hbq_dmabuf *nvmebuf = 628 (struct hbq_dmabuf *)ctxp->rqb_buffer; 629 struct lpfc_iocbq *nvmewqeq; 630 struct lpfc_nvmet_tgtport *nvmep = tgtport->private; 631 struct lpfc_dmabuf dmabuf; 632 struct ulp_bde64 bpl; 633 int rc; 634 635 if (phba->pport->load_flag & FC_UNLOADING) 636 return -ENODEV; 637 638 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 639 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); 640 641 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || 642 (ctxp->entry_cnt != 1)) { 643 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 644 "6412 NVMET LS rsp state mismatch " 645 "oxid x%x: %d %d\n", 646 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 647 } 648 ctxp->state = LPFC_NVMET_STE_LS_RSP; 649 ctxp->entry_cnt++; 650 651 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, 652 rsp->rsplen); 653 if (nvmewqeq == NULL) { 654 atomic_inc(&nvmep->xmt_ls_drop); 655 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 656 "6150 LS Drop IO x%x: Prep\n", 657 ctxp->oxid); 658 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 659 atomic_inc(&nvmep->xmt_ls_abort); 660 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 661 ctxp->sid, ctxp->oxid); 662 return -ENOMEM; 663 } 664 665 /* Save numBdes for bpl2sgl */ 666 nvmewqeq->rsvd2 = 1; 667 nvmewqeq->hba_wqidx = 0; 668 nvmewqeq->context3 = &dmabuf; 669 dmabuf.virt = &bpl; 670 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; 671 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; 672 bpl.tus.f.bdeSize = rsp->rsplen; 673 bpl.tus.f.bdeFlags = 0; 674 bpl.tus.w = le32_to_cpu(bpl.tus.w); 675 676 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; 677 nvmewqeq->iocb_cmpl = NULL; 678 nvmewqeq->context2 = ctxp; 679 680 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", 681 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); 682 683 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 684 if (rc == WQE_SUCCESS) { 685 /* 686 * Okay to repost buffer here, but wait till cmpl 687 * before freeing ctxp and iocbq. 688 */ 689 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 690 ctxp->rqb_buffer = 0; 691 atomic_inc(&nvmep->xmt_ls_rsp); 692 return 0; 693 } 694 /* Give back resources */ 695 atomic_inc(&nvmep->xmt_ls_drop); 696 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 697 "6151 LS Drop IO x%x: Issue %d\n", 698 ctxp->oxid, rc); 699 700 lpfc_nlp_put(nvmewqeq->context1); 701 702 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 703 atomic_inc(&nvmep->xmt_ls_abort); 704 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 705 return -ENXIO; 706 } 707 708 static int 709 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, 710 struct nvmefc_tgt_fcp_req *rsp) 711 { 712 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 713 struct lpfc_nvmet_rcv_ctx *ctxp = 714 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 715 struct lpfc_hba *phba = ctxp->phba; 716 struct lpfc_iocbq *nvmewqeq; 717 int rc; 718 719 if (phba->pport->load_flag & FC_UNLOADING) { 720 rc = -ENODEV; 721 goto aerr; 722 } 723 724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 725 if (ctxp->ts_cmd_nvme) { 726 if (rsp->op == NVMET_FCOP_RSP) 727 ctxp->ts_nvme_status = ktime_get_ns(); 728 else 729 ctxp->ts_nvme_data = ktime_get_ns(); 730 } 731 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 732 int id = smp_processor_id(); 733 ctxp->cpu = id; 734 if (id < LPFC_CHECK_CPU_CNT) 735 phba->cpucheck_xmt_io[id]++; 736 if (rsp->hwqid != id) { 737 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 738 "6705 CPU Check OP: " 739 "cpu %d expect %d\n", 740 id, rsp->hwqid); 741 ctxp->cpu = rsp->hwqid; 742 } 743 } 744 #endif 745 746 /* Sanity check */ 747 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || 748 (ctxp->state == LPFC_NVMET_STE_ABORT)) { 749 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 750 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 751 "6102 IO xri x%x aborted\n", 752 ctxp->oxid); 753 rc = -ENXIO; 754 goto aerr; 755 } 756 757 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 758 if (nvmewqeq == NULL) { 759 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 760 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 761 "6152 FCP Drop IO x%x: Prep\n", 762 ctxp->oxid); 763 rc = -ENXIO; 764 goto aerr; 765 } 766 767 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; 768 nvmewqeq->iocb_cmpl = NULL; 769 nvmewqeq->context2 = ctxp; 770 nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 771 ctxp->wqeq->hba_wqidx = rsp->hwqid; 772 773 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 774 ctxp->oxid, rsp->op, rsp->rsplen); 775 776 ctxp->flag |= LPFC_NVMET_IO_INP; 777 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 778 if (rc == WQE_SUCCESS) { 779 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 780 if (!ctxp->ts_cmd_nvme) 781 return 0; 782 if (rsp->op == NVMET_FCOP_RSP) 783 ctxp->ts_status_wqput = ktime_get_ns(); 784 else 785 ctxp->ts_data_wqput = ktime_get_ns(); 786 #endif 787 return 0; 788 } 789 790 /* Give back resources */ 791 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 792 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 793 "6153 FCP Drop IO x%x: Issue: %d\n", 794 ctxp->oxid, rc); 795 796 ctxp->wqeq->hba_wqidx = 0; 797 nvmewqeq->context2 = NULL; 798 nvmewqeq->context3 = NULL; 799 rc = -EBUSY; 800 aerr: 801 return rc; 802 } 803 804 static void 805 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) 806 { 807 struct lpfc_nvmet_tgtport *tport = targetport->private; 808 809 /* release any threads waiting for the unreg to complete */ 810 complete(&tport->tport_unreg_done); 811 } 812 813 static void 814 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, 815 struct nvmefc_tgt_fcp_req *req) 816 { 817 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 818 struct lpfc_nvmet_rcv_ctx *ctxp = 819 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 820 struct lpfc_hba *phba = ctxp->phba; 821 unsigned long flags; 822 823 if (phba->pport->load_flag & FC_UNLOADING) 824 return; 825 826 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 827 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", 828 ctxp->oxid, ctxp->flag, ctxp->state); 829 830 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", 831 ctxp->oxid, ctxp->flag, ctxp->state); 832 833 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 834 835 spin_lock_irqsave(&ctxp->ctxlock, flags); 836 ctxp->state = LPFC_NVMET_STE_ABORT; 837 838 /* Since iaab/iaar are NOT set, we need to check 839 * if the firmware is in process of aborting IO 840 */ 841 if (ctxp->flag & LPFC_NVMET_XBUSY) { 842 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 843 return; 844 } 845 ctxp->flag |= LPFC_NVMET_ABORT_OP; 846 847 /* An state of LPFC_NVMET_STE_RCV means we have just received 848 * the NVME command and have not started processing it. 849 * (by issuing any IO WQEs on this exchange yet) 850 */ 851 if (ctxp->state == LPFC_NVMET_STE_RCV) 852 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 853 ctxp->oxid); 854 else 855 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, 856 ctxp->oxid); 857 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 858 } 859 860 static void 861 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 862 struct nvmefc_tgt_fcp_req *rsp) 863 { 864 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 865 struct lpfc_nvmet_rcv_ctx *ctxp = 866 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 867 struct lpfc_hba *phba = ctxp->phba; 868 unsigned long flags; 869 bool aborting = false; 870 871 if (ctxp->state != LPFC_NVMET_STE_DONE && 872 ctxp->state != LPFC_NVMET_STE_ABORT) { 873 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 874 "6413 NVMET release bad state %d %d oxid x%x\n", 875 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 876 } 877 878 spin_lock_irqsave(&ctxp->ctxlock, flags); 879 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || 880 (ctxp->flag & LPFC_NVMET_XBUSY)) { 881 aborting = true; 882 /* let the abort path do the real release */ 883 lpfc_nvmet_defer_release(phba, ctxp); 884 } 885 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 886 887 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, 888 ctxp->state, aborting); 889 890 atomic_inc(&lpfc_nvmep->xmt_fcp_release); 891 892 if (aborting) 893 return; 894 895 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 896 } 897 898 static void 899 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, 900 struct nvmefc_tgt_fcp_req *rsp) 901 { 902 struct lpfc_nvmet_tgtport *tgtp; 903 struct lpfc_nvmet_rcv_ctx *ctxp = 904 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 905 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; 906 struct lpfc_hba *phba = ctxp->phba; 907 908 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", 909 ctxp->oxid, ctxp->size, smp_processor_id()); 910 911 tgtp = phba->targetport->private; 912 atomic_inc(&tgtp->rcv_fcp_cmd_defer); 913 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 914 } 915 916 static struct nvmet_fc_target_template lpfc_tgttemplate = { 917 .targetport_delete = lpfc_nvmet_targetport_delete, 918 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 919 .fcp_op = lpfc_nvmet_xmt_fcp_op, 920 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 921 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 922 .defer_rcv = lpfc_nvmet_defer_rcv, 923 924 .max_hw_queues = 1, 925 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 926 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 927 .dma_boundary = 0xFFFFFFFF, 928 929 /* optional features */ 930 .target_features = 0, 931 /* sizes of additional private data for data structures */ 932 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 933 }; 934 935 static void 936 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, 937 struct lpfc_nvmet_ctx_info *infop) 938 { 939 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; 940 unsigned long flags; 941 942 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); 943 list_for_each_entry_safe(ctx_buf, next_ctx_buf, 944 &infop->nvmet_ctx_list, list) { 945 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 946 list_del_init(&ctx_buf->list); 947 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 948 949 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); 950 ctx_buf->sglq->state = SGL_FREED; 951 ctx_buf->sglq->ndlp = NULL; 952 953 spin_lock(&phba->sli4_hba.sgl_list_lock); 954 list_add_tail(&ctx_buf->sglq->list, 955 &phba->sli4_hba.lpfc_nvmet_sgl_list); 956 spin_unlock(&phba->sli4_hba.sgl_list_lock); 957 958 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 959 kfree(ctx_buf->context); 960 } 961 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags); 962 } 963 964 static void 965 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) 966 { 967 struct lpfc_nvmet_ctx_info *infop; 968 int i, j; 969 970 /* The first context list, MRQ 0 CPU 0 */ 971 infop = phba->sli4_hba.nvmet_ctx_info; 972 if (!infop) 973 return; 974 975 /* Cycle the the entire CPU context list for every MRQ */ 976 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 977 for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { 978 __lpfc_nvmet_clean_io_for_cpu(phba, infop); 979 infop++; /* next */ 980 } 981 } 982 kfree(phba->sli4_hba.nvmet_ctx_info); 983 phba->sli4_hba.nvmet_ctx_info = NULL; 984 } 985 986 static int 987 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) 988 { 989 struct lpfc_nvmet_ctxbuf *ctx_buf; 990 struct lpfc_iocbq *nvmewqe; 991 union lpfc_wqe128 *wqe; 992 struct lpfc_nvmet_ctx_info *last_infop; 993 struct lpfc_nvmet_ctx_info *infop; 994 int i, j, idx; 995 996 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 997 "6403 Allocate NVMET resources for %d XRIs\n", 998 phba->sli4_hba.nvmet_xri_cnt); 999 1000 phba->sli4_hba.nvmet_ctx_info = kcalloc( 1001 phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, 1002 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); 1003 if (!phba->sli4_hba.nvmet_ctx_info) { 1004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1005 "6419 Failed allocate memory for " 1006 "nvmet context lists\n"); 1007 return -ENOMEM; 1008 } 1009 1010 /* 1011 * Assuming X CPUs in the system, and Y MRQs, allocate some 1012 * lpfc_nvmet_ctx_info structures as follows: 1013 * 1014 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0 1015 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1 1016 * ... 1017 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY 1018 * 1019 * Each line represents a MRQ "silo" containing an entry for 1020 * every CPU. 1021 * 1022 * MRQ X is initially assumed to be associated with CPU X, thus 1023 * contexts are initially distributed across all MRQs using 1024 * the MRQ index (N) as follows cpuN/mrqN. When contexts are 1025 * freed, the are freed to the MRQ silo based on the CPU number 1026 * of the IO completion. Thus a context that was allocated for MRQ A 1027 * whose IO completed on CPU B will be freed to cpuB/mrqA. 1028 */ 1029 infop = phba->sli4_hba.nvmet_ctx_info; 1030 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1031 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1032 INIT_LIST_HEAD(&infop->nvmet_ctx_list); 1033 spin_lock_init(&infop->nvmet_ctx_list_lock); 1034 infop->nvmet_ctx_list_cnt = 0; 1035 infop++; 1036 } 1037 } 1038 1039 /* 1040 * Setup the next CPU context info ptr for each MRQ. 1041 * MRQ 0 will cycle thru CPUs 0 - X separately from 1042 * MRQ 1 cycling thru CPUs 0 - X, and so on. 1043 */ 1044 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1045 last_infop = lpfc_get_ctx_list(phba, 0, j); 1046 for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { 1047 infop = lpfc_get_ctx_list(phba, i, j); 1048 infop->nvmet_ctx_next_cpu = last_infop; 1049 last_infop = infop; 1050 } 1051 } 1052 1053 /* For all nvmet xris, allocate resources needed to process a 1054 * received command on a per xri basis. 1055 */ 1056 idx = 0; 1057 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { 1058 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); 1059 if (!ctx_buf) { 1060 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1061 "6404 Ran out of memory for NVMET\n"); 1062 return -ENOMEM; 1063 } 1064 1065 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), 1066 GFP_KERNEL); 1067 if (!ctx_buf->context) { 1068 kfree(ctx_buf); 1069 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1070 "6405 Ran out of NVMET " 1071 "context memory\n"); 1072 return -ENOMEM; 1073 } 1074 ctx_buf->context->ctxbuf = ctx_buf; 1075 ctx_buf->context->state = LPFC_NVMET_STE_FREE; 1076 1077 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); 1078 if (!ctx_buf->iocbq) { 1079 kfree(ctx_buf->context); 1080 kfree(ctx_buf); 1081 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1082 "6406 Ran out of NVMET iocb/WQEs\n"); 1083 return -ENOMEM; 1084 } 1085 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; 1086 nvmewqe = ctx_buf->iocbq; 1087 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 1088 /* Initialize WQE */ 1089 memset(wqe, 0, sizeof(union lpfc_wqe)); 1090 /* Word 7 */ 1091 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); 1092 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); 1093 /* Word 10 */ 1094 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1095 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); 1096 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); 1097 1098 ctx_buf->iocbq->context1 = NULL; 1099 spin_lock(&phba->sli4_hba.sgl_list_lock); 1100 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); 1101 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1102 if (!ctx_buf->sglq) { 1103 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 1104 kfree(ctx_buf->context); 1105 kfree(ctx_buf); 1106 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 1107 "6407 Ran out of NVMET XRIs\n"); 1108 return -ENOMEM; 1109 } 1110 1111 /* 1112 * Add ctx to MRQidx context list. Our initial assumption 1113 * is MRQidx will be associated with CPUidx. This association 1114 * can change on the fly. 1115 */ 1116 infop = lpfc_get_ctx_list(phba, idx, idx); 1117 spin_lock(&infop->nvmet_ctx_list_lock); 1118 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); 1119 infop->nvmet_ctx_list_cnt++; 1120 spin_unlock(&infop->nvmet_ctx_list_lock); 1121 1122 /* Spread ctx structures evenly across all MRQs */ 1123 idx++; 1124 if (idx >= phba->cfg_nvmet_mrq) 1125 idx = 0; 1126 } 1127 1128 infop = phba->sli4_hba.nvmet_ctx_info; 1129 for (j = 0; j < phba->cfg_nvmet_mrq; j++) { 1130 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1131 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1132 "6408 TOTAL NVMET ctx for CPU %d " 1133 "MRQ %d: cnt %d nextcpu %p\n", 1134 i, j, infop->nvmet_ctx_list_cnt, 1135 infop->nvmet_ctx_next_cpu); 1136 infop++; 1137 } 1138 } 1139 return 0; 1140 } 1141 1142 int 1143 lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 1144 { 1145 struct lpfc_vport *vport = phba->pport; 1146 struct lpfc_nvmet_tgtport *tgtp; 1147 struct nvmet_fc_port_info pinfo; 1148 int error; 1149 1150 if (phba->targetport) 1151 return 0; 1152 1153 error = lpfc_nvmet_setup_io_context(phba); 1154 if (error) 1155 return error; 1156 1157 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 1158 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 1159 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 1160 pinfo.port_id = vport->fc_myDID; 1161 1162 /* Limit to LPFC_MAX_NVME_SEG_CNT. 1163 * For now need + 1 to get around NVME transport logic. 1164 */ 1165 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 1166 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1167 "6400 Reducing sg segment cnt to %d\n", 1168 LPFC_MAX_NVME_SEG_CNT); 1169 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 1170 } else { 1171 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 1172 } 1173 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 1174 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 1175 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; 1176 1177 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1178 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 1179 &phba->pcidev->dev, 1180 &phba->targetport); 1181 #else 1182 error = -ENOENT; 1183 #endif 1184 if (error) { 1185 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1186 "6025 Cannot register NVME targetport x%x: " 1187 "portnm %llx nodenm %llx segs %d qs %d\n", 1188 error, 1189 pinfo.port_name, pinfo.node_name, 1190 lpfc_tgttemplate.max_sgl_segments, 1191 lpfc_tgttemplate.max_hw_queues); 1192 phba->targetport = NULL; 1193 phba->nvmet_support = 0; 1194 1195 lpfc_nvmet_cleanup_io_context(phba); 1196 1197 } else { 1198 tgtp = (struct lpfc_nvmet_tgtport *) 1199 phba->targetport->private; 1200 tgtp->phba = phba; 1201 1202 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1203 "6026 Registered NVME " 1204 "targetport: %p, private %p " 1205 "portnm %llx nodenm %llx segs %d qs %d\n", 1206 phba->targetport, tgtp, 1207 pinfo.port_name, pinfo.node_name, 1208 lpfc_tgttemplate.max_sgl_segments, 1209 lpfc_tgttemplate.max_hw_queues); 1210 1211 atomic_set(&tgtp->rcv_ls_req_in, 0); 1212 atomic_set(&tgtp->rcv_ls_req_out, 0); 1213 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1214 atomic_set(&tgtp->xmt_ls_abort, 0); 1215 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); 1216 atomic_set(&tgtp->xmt_ls_rsp, 0); 1217 atomic_set(&tgtp->xmt_ls_drop, 0); 1218 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1219 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 1220 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1221 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1222 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1223 atomic_set(&tgtp->xmt_fcp_drop, 0); 1224 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1225 atomic_set(&tgtp->xmt_fcp_read, 0); 1226 atomic_set(&tgtp->xmt_fcp_write, 0); 1227 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1228 atomic_set(&tgtp->xmt_fcp_release, 0); 1229 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1230 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1231 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1232 atomic_set(&tgtp->xmt_fcp_abort, 0); 1233 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); 1234 atomic_set(&tgtp->xmt_abort_unsol, 0); 1235 atomic_set(&tgtp->xmt_abort_sol, 0); 1236 atomic_set(&tgtp->xmt_abort_rsp, 0); 1237 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1238 } 1239 return error; 1240 } 1241 1242 int 1243 lpfc_nvmet_update_targetport(struct lpfc_hba *phba) 1244 { 1245 struct lpfc_vport *vport = phba->pport; 1246 1247 if (!phba->targetport) 1248 return 0; 1249 1250 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1251 "6007 Update NVMET port %p did x%x\n", 1252 phba->targetport, vport->fc_myDID); 1253 1254 phba->targetport->port_id = vport->fc_myDID; 1255 return 0; 1256 } 1257 1258 /** 1259 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort 1260 * @phba: pointer to lpfc hba data structure. 1261 * @axri: pointer to the nvmet xri abort wcqe structure. 1262 * 1263 * This routine is invoked by the worker thread to process a SLI4 fast-path 1264 * NVMET aborted xri. 1265 **/ 1266 void 1267 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, 1268 struct sli4_wcqe_xri_aborted *axri) 1269 { 1270 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 1271 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 1272 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1273 struct lpfc_nodelist *ndlp; 1274 unsigned long iflag = 0; 1275 int rrq_empty = 0; 1276 bool released = false; 1277 1278 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1279 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); 1280 1281 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 1282 return; 1283 spin_lock_irqsave(&phba->hbalock, iflag); 1284 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1285 list_for_each_entry_safe(ctxp, next_ctxp, 1286 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1287 list) { 1288 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1289 continue; 1290 1291 /* Check if we already received a free context call 1292 * and we have completed processing an abort situation. 1293 */ 1294 if (ctxp->flag & LPFC_NVMET_CTX_RLS && 1295 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { 1296 list_del(&ctxp->list); 1297 released = true; 1298 } 1299 ctxp->flag &= ~LPFC_NVMET_XBUSY; 1300 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1301 1302 rrq_empty = list_empty(&phba->active_rrq_list); 1303 spin_unlock_irqrestore(&phba->hbalock, iflag); 1304 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1305 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1306 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1307 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1308 lpfc_set_rrq_active(phba, ndlp, 1309 ctxp->ctxbuf->sglq->sli4_lxritag, 1310 rxid, 1); 1311 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1312 } 1313 1314 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1315 "6318 XB aborted oxid %x flg x%x (%x)\n", 1316 ctxp->oxid, ctxp->flag, released); 1317 if (released) 1318 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1319 1320 if (rrq_empty) 1321 lpfc_worker_wake_up(phba); 1322 return; 1323 } 1324 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1325 spin_unlock_irqrestore(&phba->hbalock, iflag); 1326 } 1327 1328 int 1329 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 1330 struct fc_frame_header *fc_hdr) 1331 1332 { 1333 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1334 struct lpfc_hba *phba = vport->phba; 1335 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1336 struct nvmefc_tgt_fcp_req *rsp; 1337 uint16_t xri; 1338 unsigned long iflag = 0; 1339 1340 xri = be16_to_cpu(fc_hdr->fh_ox_id); 1341 1342 spin_lock_irqsave(&phba->hbalock, iflag); 1343 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1344 list_for_each_entry_safe(ctxp, next_ctxp, 1345 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1346 list) { 1347 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1348 continue; 1349 1350 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1351 spin_unlock_irqrestore(&phba->hbalock, iflag); 1352 1353 spin_lock_irqsave(&ctxp->ctxlock, iflag); 1354 ctxp->flag |= LPFC_NVMET_ABTS_RCV; 1355 spin_unlock_irqrestore(&ctxp->ctxlock, iflag); 1356 1357 lpfc_nvmeio_data(phba, 1358 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1359 xri, smp_processor_id(), 0); 1360 1361 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1362 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); 1363 1364 rsp = &ctxp->ctx.fcp_req; 1365 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); 1366 1367 /* Respond with BA_ACC accordingly */ 1368 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); 1369 return 0; 1370 } 1371 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1372 spin_unlock_irqrestore(&phba->hbalock, iflag); 1373 1374 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1375 xri, smp_processor_id(), 1); 1376 1377 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1378 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); 1379 1380 /* Respond with BA_RJT accordingly */ 1381 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); 1382 #endif 1383 return 0; 1384 } 1385 1386 void 1387 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 1388 { 1389 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1390 struct lpfc_nvmet_tgtport *tgtp; 1391 1392 if (phba->nvmet_support == 0) 1393 return; 1394 if (phba->targetport) { 1395 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1396 init_completion(&tgtp->tport_unreg_done); 1397 nvmet_fc_unregister_targetport(phba->targetport); 1398 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1399 lpfc_nvmet_cleanup_io_context(phba); 1400 } 1401 phba->targetport = NULL; 1402 #endif 1403 } 1404 1405 /** 1406 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer 1407 * @phba: pointer to lpfc hba data structure. 1408 * @pring: pointer to a SLI ring. 1409 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1410 * 1411 * This routine is used for processing the WQE associated with a unsolicited 1412 * event. It first determines whether there is an existing ndlp that matches 1413 * the DID from the unsolicited WQE. If not, it will create a new one with 1414 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1415 * WQE is then used to invoke the proper routine and to set up proper state 1416 * of the discovery state machine. 1417 **/ 1418 static void 1419 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1420 struct hbq_dmabuf *nvmebuf) 1421 { 1422 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1423 struct lpfc_nvmet_tgtport *tgtp; 1424 struct fc_frame_header *fc_hdr; 1425 struct lpfc_nvmet_rcv_ctx *ctxp; 1426 uint32_t *payload; 1427 uint32_t size, oxid, sid, rc; 1428 1429 if (!nvmebuf || !phba->targetport) { 1430 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1431 "6154 LS Drop IO\n"); 1432 oxid = 0; 1433 size = 0; 1434 sid = 0; 1435 ctxp = NULL; 1436 goto dropit; 1437 } 1438 1439 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1440 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1441 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1442 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 1443 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1444 sid = sli4_sid_from_fc_hdr(fc_hdr); 1445 1446 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); 1447 if (ctxp == NULL) { 1448 atomic_inc(&tgtp->rcv_ls_req_drop); 1449 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1450 "6155 LS Drop IO x%x: Alloc\n", 1451 oxid); 1452 dropit: 1453 lpfc_nvmeio_data(phba, "NVMET LS DROP: " 1454 "xri x%x sz %d from %06x\n", 1455 oxid, size, sid); 1456 if (nvmebuf) 1457 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1458 return; 1459 } 1460 ctxp->phba = phba; 1461 ctxp->size = size; 1462 ctxp->oxid = oxid; 1463 ctxp->sid = sid; 1464 ctxp->wqeq = NULL; 1465 ctxp->state = LPFC_NVMET_STE_LS_RCV; 1466 ctxp->entry_cnt = 1; 1467 ctxp->rqb_buffer = (void *)nvmebuf; 1468 1469 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", 1470 oxid, size, sid); 1471 /* 1472 * The calling sequence should be: 1473 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done 1474 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. 1475 */ 1476 atomic_inc(&tgtp->rcv_ls_req_in); 1477 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, 1478 payload, size); 1479 1480 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1481 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " 1482 "%08x %08x %08x\n", size, rc, 1483 *payload, *(payload+1), *(payload+2), 1484 *(payload+3), *(payload+4), *(payload+5)); 1485 1486 if (rc == 0) { 1487 atomic_inc(&tgtp->rcv_ls_req_out); 1488 return; 1489 } 1490 1491 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", 1492 oxid, size, sid); 1493 1494 atomic_inc(&tgtp->rcv_ls_req_drop); 1495 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1496 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", 1497 ctxp->oxid, rc); 1498 1499 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1500 if (nvmebuf) 1501 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1502 1503 atomic_inc(&tgtp->xmt_ls_abort); 1504 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 1505 #endif 1506 } 1507 1508 static struct lpfc_nvmet_ctxbuf * 1509 lpfc_nvmet_replenish_context(struct lpfc_hba *phba, 1510 struct lpfc_nvmet_ctx_info *current_infop) 1511 { 1512 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1513 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL; 1514 struct lpfc_nvmet_ctx_info *get_infop; 1515 int i; 1516 1517 /* 1518 * The current_infop for the MRQ a NVME command IU was received 1519 * on is empty. Our goal is to replenish this MRQs context 1520 * list from a another CPUs. 1521 * 1522 * First we need to pick a context list to start looking on. 1523 * nvmet_ctx_start_cpu has available context the last time 1524 * we needed to replenish this CPU where nvmet_ctx_next_cpu 1525 * is just the next sequential CPU for this MRQ. 1526 */ 1527 if (current_infop->nvmet_ctx_start_cpu) 1528 get_infop = current_infop->nvmet_ctx_start_cpu; 1529 else 1530 get_infop = current_infop->nvmet_ctx_next_cpu; 1531 1532 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 1533 if (get_infop == current_infop) { 1534 get_infop = get_infop->nvmet_ctx_next_cpu; 1535 continue; 1536 } 1537 spin_lock(&get_infop->nvmet_ctx_list_lock); 1538 1539 /* Just take the entire context list, if there are any */ 1540 if (get_infop->nvmet_ctx_list_cnt) { 1541 list_splice_init(&get_infop->nvmet_ctx_list, 1542 ¤t_infop->nvmet_ctx_list); 1543 current_infop->nvmet_ctx_list_cnt = 1544 get_infop->nvmet_ctx_list_cnt - 1; 1545 get_infop->nvmet_ctx_list_cnt = 0; 1546 spin_unlock(&get_infop->nvmet_ctx_list_lock); 1547 1548 current_infop->nvmet_ctx_start_cpu = get_infop; 1549 list_remove_head(¤t_infop->nvmet_ctx_list, 1550 ctx_buf, struct lpfc_nvmet_ctxbuf, 1551 list); 1552 return ctx_buf; 1553 } 1554 1555 /* Otherwise, move on to the next CPU for this MRQ */ 1556 spin_unlock(&get_infop->nvmet_ctx_list_lock); 1557 get_infop = get_infop->nvmet_ctx_next_cpu; 1558 } 1559 1560 #endif 1561 /* Nothing found, all contexts for the MRQ are in-flight */ 1562 return NULL; 1563 } 1564 1565 /** 1566 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer 1567 * @phba: pointer to lpfc hba data structure. 1568 * @idx: relative index of MRQ vector 1569 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1570 * 1571 * This routine is used for processing the WQE associated with a unsolicited 1572 * event. It first determines whether there is an existing ndlp that matches 1573 * the DID from the unsolicited WQE. If not, it will create a new one with 1574 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1575 * WQE is then used to invoke the proper routine and to set up proper state 1576 * of the discovery state machine. 1577 **/ 1578 static void 1579 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, 1580 uint32_t idx, 1581 struct rqb_dmabuf *nvmebuf, 1582 uint64_t isr_timestamp) 1583 { 1584 struct lpfc_nvmet_rcv_ctx *ctxp; 1585 struct lpfc_nvmet_tgtport *tgtp; 1586 struct fc_frame_header *fc_hdr; 1587 struct lpfc_nvmet_ctxbuf *ctx_buf; 1588 struct lpfc_nvmet_ctx_info *current_infop; 1589 uint32_t *payload; 1590 uint32_t size, oxid, sid, rc, qno; 1591 unsigned long iflag; 1592 int current_cpu; 1593 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1594 uint32_t id; 1595 #endif 1596 1597 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1598 return; 1599 1600 ctx_buf = NULL; 1601 if (!nvmebuf || !phba->targetport) { 1602 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1603 "6157 NVMET FCP Drop IO\n"); 1604 oxid = 0; 1605 size = 0; 1606 sid = 0; 1607 ctxp = NULL; 1608 goto dropit; 1609 } 1610 1611 /* 1612 * Get a pointer to the context list for this MRQ based on 1613 * the CPU this MRQ IRQ is associated with. If the CPU association 1614 * changes from our initial assumption, the context list could 1615 * be empty, thus it would need to be replenished with the 1616 * context list from another CPU for this MRQ. 1617 */ 1618 current_cpu = smp_processor_id(); 1619 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx); 1620 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag); 1621 if (current_infop->nvmet_ctx_list_cnt) { 1622 list_remove_head(¤t_infop->nvmet_ctx_list, 1623 ctx_buf, struct lpfc_nvmet_ctxbuf, list); 1624 current_infop->nvmet_ctx_list_cnt--; 1625 } else { 1626 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop); 1627 } 1628 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag); 1629 1630 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1631 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1632 size = nvmebuf->bytes_recv; 1633 1634 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1635 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 1636 id = smp_processor_id(); 1637 if (id < LPFC_CHECK_CPU_CNT) 1638 phba->cpucheck_rcv_io[id]++; 1639 } 1640 #endif 1641 1642 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", 1643 oxid, size, smp_processor_id()); 1644 1645 if (!ctx_buf) { 1646 /* Queue this NVME IO to process later */ 1647 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 1648 list_add_tail(&nvmebuf->hbuf.list, 1649 &phba->sli4_hba.lpfc_nvmet_io_wait_list); 1650 phba->sli4_hba.nvmet_io_wait_cnt++; 1651 phba->sli4_hba.nvmet_io_wait_total++; 1652 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 1653 iflag); 1654 1655 /* Post a brand new DMA buffer to RQ */ 1656 qno = nvmebuf->idx; 1657 lpfc_post_rq_buffer( 1658 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], 1659 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); 1660 return; 1661 } 1662 1663 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1664 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1665 sid = sli4_sid_from_fc_hdr(fc_hdr); 1666 1667 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 1668 if (ctxp->state != LPFC_NVMET_STE_FREE) { 1669 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1670 "6414 NVMET Context corrupt %d %d oxid x%x\n", 1671 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 1672 } 1673 ctxp->wqeq = NULL; 1674 ctxp->txrdy = NULL; 1675 ctxp->offset = 0; 1676 ctxp->phba = phba; 1677 ctxp->size = size; 1678 ctxp->oxid = oxid; 1679 ctxp->sid = sid; 1680 ctxp->idx = idx; 1681 ctxp->state = LPFC_NVMET_STE_RCV; 1682 ctxp->entry_cnt = 1; 1683 ctxp->flag = 0; 1684 ctxp->ctxbuf = ctx_buf; 1685 spin_lock_init(&ctxp->ctxlock); 1686 1687 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1688 if (isr_timestamp) { 1689 ctxp->ts_isr_cmd = isr_timestamp; 1690 ctxp->ts_cmd_nvme = ktime_get_ns(); 1691 ctxp->ts_nvme_data = 0; 1692 ctxp->ts_data_wqput = 0; 1693 ctxp->ts_isr_data = 0; 1694 ctxp->ts_data_nvme = 0; 1695 ctxp->ts_nvme_status = 0; 1696 ctxp->ts_status_wqput = 0; 1697 ctxp->ts_isr_status = 0; 1698 ctxp->ts_status_nvme = 0; 1699 } else { 1700 ctxp->ts_cmd_nvme = 0; 1701 } 1702 #endif 1703 1704 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1705 /* 1706 * The calling sequence should be: 1707 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1708 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1709 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in 1710 * the NVME command / FC header is stored, so we are free to repost 1711 * the buffer. 1712 */ 1713 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1714 payload, size); 1715 1716 /* Process FCP command */ 1717 if (rc == 0) { 1718 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1719 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1720 return; 1721 } 1722 1723 /* Processing of FCP command is deferred */ 1724 if (rc == -EOVERFLOW) { 1725 lpfc_nvmeio_data(phba, 1726 "NVMET RCV BUSY: xri x%x sz %d from %06x\n", 1727 oxid, size, sid); 1728 /* defer reposting rcv buffer till .defer_rcv callback */ 1729 ctxp->rqb_buffer = nvmebuf; 1730 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1731 return; 1732 } 1733 1734 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1735 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1736 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 1737 ctxp->oxid, rc, 1738 atomic_read(&tgtp->rcv_fcp_cmd_in), 1739 atomic_read(&tgtp->rcv_fcp_cmd_out), 1740 atomic_read(&tgtp->xmt_fcp_release)); 1741 dropit: 1742 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1743 oxid, size, sid); 1744 if (oxid) { 1745 lpfc_nvmet_defer_release(phba, ctxp); 1746 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1747 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1748 return; 1749 } 1750 1751 if (ctx_buf) 1752 lpfc_nvmet_ctxbuf_post(phba, ctx_buf); 1753 1754 if (nvmebuf) 1755 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1756 } 1757 1758 /** 1759 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport 1760 * @phba: pointer to lpfc hba data structure. 1761 * @pring: pointer to a SLI ring. 1762 * @nvmebuf: pointer to received nvme data structure. 1763 * 1764 * This routine is used to process an unsolicited event received from a SLI 1765 * (Service Level Interface) ring. The actual processing of the data buffer 1766 * associated with the unsolicited event is done by invoking the routine 1767 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the 1768 * SLI RQ on which the unsolicited event was received. 1769 **/ 1770 void 1771 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1772 struct lpfc_iocbq *piocb) 1773 { 1774 struct lpfc_dmabuf *d_buf; 1775 struct hbq_dmabuf *nvmebuf; 1776 1777 d_buf = piocb->context2; 1778 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1779 1780 if (phba->nvmet_support == 0) { 1781 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1782 return; 1783 } 1784 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); 1785 } 1786 1787 /** 1788 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport 1789 * @phba: pointer to lpfc hba data structure. 1790 * @idx: relative index of MRQ vector 1791 * @nvmebuf: pointer to received nvme data structure. 1792 * 1793 * This routine is used to process an unsolicited event received from a SLI 1794 * (Service Level Interface) ring. The actual processing of the data buffer 1795 * associated with the unsolicited event is done by invoking the routine 1796 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the 1797 * SLI RQ on which the unsolicited event was received. 1798 **/ 1799 void 1800 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 1801 uint32_t idx, 1802 struct rqb_dmabuf *nvmebuf, 1803 uint64_t isr_timestamp) 1804 { 1805 if (phba->nvmet_support == 0) { 1806 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); 1807 return; 1808 } 1809 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, 1810 isr_timestamp); 1811 } 1812 1813 /** 1814 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure 1815 * @phba: pointer to a host N_Port data structure. 1816 * @ctxp: Context info for NVME LS Request 1817 * @rspbuf: DMA buffer of NVME command. 1818 * @rspsize: size of the NVME command. 1819 * 1820 * This routine is used for allocating a lpfc-WQE data structure from 1821 * the driver lpfc-WQE free-list and prepare the WQE with the parameters 1822 * passed into the routine for discovery state machine to issue an Extended 1823 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation 1824 * and preparation routine that is used by all the discovery state machine 1825 * routines and the NVME command-specific fields will be later set up by 1826 * the individual discovery machine routines after calling this routine 1827 * allocating and preparing a generic WQE data structure. It fills in the 1828 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 1829 * payload and response payload (if expected). The reference count on the 1830 * ndlp is incremented by 1 and the reference to the ndlp is put into 1831 * context1 of the WQE data structure for this WQE to hold the ndlp 1832 * reference for the command's callback function to access later. 1833 * 1834 * Return code 1835 * Pointer to the newly allocated/prepared nvme wqe data structure 1836 * NULL - when nvme wqe data structure allocation/preparation failed 1837 **/ 1838 static struct lpfc_iocbq * 1839 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, 1840 struct lpfc_nvmet_rcv_ctx *ctxp, 1841 dma_addr_t rspbuf, uint16_t rspsize) 1842 { 1843 struct lpfc_nodelist *ndlp; 1844 struct lpfc_iocbq *nvmewqe; 1845 union lpfc_wqe *wqe; 1846 1847 if (!lpfc_is_link_up(phba)) { 1848 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1849 "6104 NVMET prep LS wqe: link err: " 1850 "NPORT x%x oxid:x%x ste %d\n", 1851 ctxp->sid, ctxp->oxid, ctxp->state); 1852 return NULL; 1853 } 1854 1855 /* Allocate buffer for command wqe */ 1856 nvmewqe = lpfc_sli_get_iocbq(phba); 1857 if (nvmewqe == NULL) { 1858 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1859 "6105 NVMET prep LS wqe: No WQE: " 1860 "NPORT x%x oxid x%x ste %d\n", 1861 ctxp->sid, ctxp->oxid, ctxp->state); 1862 return NULL; 1863 } 1864 1865 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1866 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1867 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1868 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1869 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1870 "6106 NVMET prep LS wqe: No ndlp: " 1871 "NPORT x%x oxid x%x ste %d\n", 1872 ctxp->sid, ctxp->oxid, ctxp->state); 1873 goto nvme_wqe_free_wqeq_exit; 1874 } 1875 ctxp->wqeq = nvmewqe; 1876 1877 /* prevent preparing wqe with NULL ndlp reference */ 1878 nvmewqe->context1 = lpfc_nlp_get(ndlp); 1879 if (nvmewqe->context1 == NULL) 1880 goto nvme_wqe_free_wqeq_exit; 1881 nvmewqe->context2 = ctxp; 1882 1883 wqe = &nvmewqe->wqe; 1884 memset(wqe, 0, sizeof(union lpfc_wqe)); 1885 1886 /* Words 0 - 2 */ 1887 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1888 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; 1889 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); 1890 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); 1891 1892 /* Word 3 */ 1893 1894 /* Word 4 */ 1895 1896 /* Word 5 */ 1897 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1898 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1899 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1900 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); 1901 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1902 1903 /* Word 6 */ 1904 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 1905 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1906 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); 1907 1908 /* Word 7 */ 1909 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 1910 CMD_XMIT_SEQUENCE64_WQE); 1911 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); 1912 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 1913 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 1914 1915 /* Word 8 */ 1916 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; 1917 1918 /* Word 9 */ 1919 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); 1920 /* Needs to be set by caller */ 1921 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); 1922 1923 /* Word 10 */ 1924 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 1925 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 1926 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 1927 LPFC_WQE_LENLOC_WORD12); 1928 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 1929 1930 /* Word 11 */ 1931 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, 1932 LPFC_WQE_CQ_ID_DEFAULT); 1933 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, 1934 OTHER_COMMAND); 1935 1936 /* Word 12 */ 1937 wqe->xmit_sequence.xmit_len = rspsize; 1938 1939 nvmewqe->retry = 1; 1940 nvmewqe->vport = phba->pport; 1941 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1942 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; 1943 1944 /* Xmit NVMET response to remote NPORT <did> */ 1945 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1946 "6039 Xmit NVMET LS response to remote " 1947 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", 1948 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, 1949 rspsize); 1950 return nvmewqe; 1951 1952 nvme_wqe_free_wqeq_exit: 1953 nvmewqe->context2 = NULL; 1954 nvmewqe->context3 = NULL; 1955 lpfc_sli_release_iocbq(phba, nvmewqe); 1956 return NULL; 1957 } 1958 1959 1960 static struct lpfc_iocbq * 1961 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, 1962 struct lpfc_nvmet_rcv_ctx *ctxp) 1963 { 1964 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; 1965 struct lpfc_nvmet_tgtport *tgtp; 1966 struct sli4_sge *sgl; 1967 struct lpfc_nodelist *ndlp; 1968 struct lpfc_iocbq *nvmewqe; 1969 struct scatterlist *sgel; 1970 union lpfc_wqe128 *wqe; 1971 uint32_t *txrdy; 1972 dma_addr_t physaddr; 1973 int i, cnt; 1974 int xc = 1; 1975 1976 if (!lpfc_is_link_up(phba)) { 1977 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1978 "6107 NVMET prep FCP wqe: link err:" 1979 "NPORT x%x oxid x%x ste %d\n", 1980 ctxp->sid, ctxp->oxid, ctxp->state); 1981 return NULL; 1982 } 1983 1984 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1985 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1986 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1987 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1988 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1989 "6108 NVMET prep FCP wqe: no ndlp: " 1990 "NPORT x%x oxid x%x ste %d\n", 1991 ctxp->sid, ctxp->oxid, ctxp->state); 1992 return NULL; 1993 } 1994 1995 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { 1996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1997 "6109 NVMET prep FCP wqe: seg cnt err: " 1998 "NPORT x%x oxid x%x ste %d cnt %d\n", 1999 ctxp->sid, ctxp->oxid, ctxp->state, 2000 phba->cfg_nvme_seg_cnt); 2001 return NULL; 2002 } 2003 2004 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2005 nvmewqe = ctxp->wqeq; 2006 if (nvmewqe == NULL) { 2007 /* Allocate buffer for command wqe */ 2008 nvmewqe = ctxp->ctxbuf->iocbq; 2009 if (nvmewqe == NULL) { 2010 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2011 "6110 NVMET prep FCP wqe: No " 2012 "WQE: NPORT x%x oxid x%x ste %d\n", 2013 ctxp->sid, ctxp->oxid, ctxp->state); 2014 return NULL; 2015 } 2016 ctxp->wqeq = nvmewqe; 2017 xc = 0; /* create new XRI */ 2018 nvmewqe->sli4_lxritag = NO_XRI; 2019 nvmewqe->sli4_xritag = NO_XRI; 2020 } 2021 2022 /* Sanity check */ 2023 if (((ctxp->state == LPFC_NVMET_STE_RCV) && 2024 (ctxp->entry_cnt == 1)) || 2025 (ctxp->state == LPFC_NVMET_STE_DATA)) { 2026 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 2027 } else { 2028 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2029 "6111 Wrong state NVMET FCP: %d cnt %d\n", 2030 ctxp->state, ctxp->entry_cnt); 2031 return NULL; 2032 } 2033 2034 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; 2035 switch (rsp->op) { 2036 case NVMET_FCOP_READDATA: 2037 case NVMET_FCOP_READDATA_RSP: 2038 /* Words 0 - 2 : The first sg segment */ 2039 sgel = &rsp->sg[0]; 2040 physaddr = sg_dma_address(sgel); 2041 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2042 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); 2043 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); 2044 wqe->fcp_tsend.bde.addrHigh = 2045 cpu_to_le32(putPaddrHigh(physaddr)); 2046 2047 /* Word 3 */ 2048 wqe->fcp_tsend.payload_offset_len = 0; 2049 2050 /* Word 4 */ 2051 wqe->fcp_tsend.relative_offset = ctxp->offset; 2052 2053 /* Word 5 */ 2054 2055 /* Word 6 */ 2056 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, 2057 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2058 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, 2059 nvmewqe->sli4_xritag); 2060 2061 /* Word 7 */ 2062 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, 1); 2063 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 2064 2065 /* Word 8 */ 2066 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; 2067 2068 /* Word 9 */ 2069 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); 2070 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); 2071 2072 /* Word 10 */ 2073 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 2074 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); 2075 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); 2076 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, 2077 LPFC_WQE_LENLOC_WORD12); 2078 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); 2079 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); 2080 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 2081 if (phba->cfg_nvme_oas) 2082 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); 2083 2084 /* Word 11 */ 2085 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, 2086 LPFC_WQE_CQ_ID_DEFAULT); 2087 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, 2088 FCP_COMMAND_TSEND); 2089 2090 /* Word 12 */ 2091 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 2092 2093 /* Setup 2 SKIP SGEs */ 2094 sgl->addr_hi = 0; 2095 sgl->addr_lo = 0; 2096 sgl->word2 = 0; 2097 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2098 sgl->word2 = cpu_to_le32(sgl->word2); 2099 sgl->sge_len = 0; 2100 sgl++; 2101 sgl->addr_hi = 0; 2102 sgl->addr_lo = 0; 2103 sgl->word2 = 0; 2104 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2105 sgl->word2 = cpu_to_le32(sgl->word2); 2106 sgl->sge_len = 0; 2107 sgl++; 2108 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 2109 atomic_inc(&tgtp->xmt_fcp_read_rsp); 2110 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 2111 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && 2112 (rsp->rsplen == 12)) { 2113 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); 2114 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 2115 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 2116 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 2117 } else { 2118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2119 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); 2120 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); 2121 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 2122 ((rsp->rsplen >> 2) - 1)); 2123 memcpy(&wqe->words[16], rsp->rspaddr, 2124 rsp->rsplen); 2125 } 2126 } else { 2127 atomic_inc(&tgtp->xmt_fcp_read); 2128 2129 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2130 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 2131 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 2132 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); 2133 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 2134 } 2135 break; 2136 2137 case NVMET_FCOP_WRITEDATA: 2138 /* Words 0 - 2 : The first sg segment */ 2139 txrdy = dma_pool_alloc(phba->txrdy_payload_pool, 2140 GFP_KERNEL, &physaddr); 2141 if (!txrdy) { 2142 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2143 "6041 Bad txrdy buffer: oxid x%x\n", 2144 ctxp->oxid); 2145 return NULL; 2146 } 2147 ctxp->txrdy = txrdy; 2148 ctxp->txrdy_phys = physaddr; 2149 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2150 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; 2151 wqe->fcp_treceive.bde.addrLow = 2152 cpu_to_le32(putPaddrLow(physaddr)); 2153 wqe->fcp_treceive.bde.addrHigh = 2154 cpu_to_le32(putPaddrHigh(physaddr)); 2155 2156 /* Word 3 */ 2157 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; 2158 2159 /* Word 4 */ 2160 wqe->fcp_treceive.relative_offset = ctxp->offset; 2161 2162 /* Word 5 */ 2163 2164 /* Word 6 */ 2165 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, 2166 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2167 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, 2168 nvmewqe->sli4_xritag); 2169 2170 /* Word 7 */ 2171 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, 1); 2172 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); 2173 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, 2174 CMD_FCP_TRECEIVE64_WQE); 2175 2176 /* Word 8 */ 2177 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; 2178 2179 /* Word 9 */ 2180 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); 2181 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); 2182 2183 /* Word 10 */ 2184 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 2185 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); 2186 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); 2187 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, 2188 LPFC_WQE_LENLOC_WORD12); 2189 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); 2190 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); 2191 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 2192 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 2193 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 2194 if (phba->cfg_nvme_oas) 2195 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); 2196 2197 /* Word 11 */ 2198 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, 2199 LPFC_WQE_CQ_ID_DEFAULT); 2200 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, 2201 FCP_COMMAND_TRECEIVE); 2202 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2203 2204 /* Word 12 */ 2205 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 2206 2207 /* Setup 1 TXRDY and 1 SKIP SGE */ 2208 txrdy[0] = 0; 2209 txrdy[1] = cpu_to_be32(rsp->transfer_length); 2210 txrdy[2] = 0; 2211 2212 sgl->addr_hi = putPaddrHigh(physaddr); 2213 sgl->addr_lo = putPaddrLow(physaddr); 2214 sgl->word2 = 0; 2215 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2216 sgl->word2 = cpu_to_le32(sgl->word2); 2217 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); 2218 sgl++; 2219 sgl->addr_hi = 0; 2220 sgl->addr_lo = 0; 2221 sgl->word2 = 0; 2222 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2223 sgl->word2 = cpu_to_le32(sgl->word2); 2224 sgl->sge_len = 0; 2225 sgl++; 2226 atomic_inc(&tgtp->xmt_fcp_write); 2227 break; 2228 2229 case NVMET_FCOP_RSP: 2230 /* Words 0 - 2 */ 2231 physaddr = rsp->rspdma; 2232 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2233 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 2234 wqe->fcp_trsp.bde.addrLow = 2235 cpu_to_le32(putPaddrLow(physaddr)); 2236 wqe->fcp_trsp.bde.addrHigh = 2237 cpu_to_le32(putPaddrHigh(physaddr)); 2238 2239 /* Word 3 */ 2240 wqe->fcp_trsp.response_len = rsp->rsplen; 2241 2242 /* Word 4 */ 2243 wqe->fcp_trsp.rsvd_4_5[0] = 0; 2244 2245 2246 /* Word 5 */ 2247 2248 /* Word 6 */ 2249 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, 2250 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2251 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, 2252 nvmewqe->sli4_xritag); 2253 2254 /* Word 7 */ 2255 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, 0); 2256 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); 2257 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); 2258 2259 /* Word 8 */ 2260 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; 2261 2262 /* Word 9 */ 2263 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); 2264 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); 2265 2266 /* Word 10 */ 2267 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2268 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); 2269 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); 2270 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, 2271 LPFC_WQE_LENLOC_WORD3); 2272 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); 2273 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2274 if (phba->cfg_nvme_oas) 2275 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); 2276 2277 /* Word 11 */ 2278 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, 2279 LPFC_WQE_CQ_ID_DEFAULT); 2280 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, 2281 FCP_COMMAND_TRSP); 2282 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2283 2284 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { 2285 /* Good response - all zero's on wire */ 2286 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); 2287 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 2288 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 2289 } else { 2290 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); 2291 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); 2292 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 2293 ((rsp->rsplen >> 2) - 1)); 2294 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 2295 } 2296 2297 /* Use rspbuf, NOT sg list */ 2298 rsp->sg_cnt = 0; 2299 sgl->word2 = 0; 2300 atomic_inc(&tgtp->xmt_fcp_rsp); 2301 break; 2302 2303 default: 2304 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2305 "6064 Unknown Rsp Op %d\n", 2306 rsp->op); 2307 return NULL; 2308 } 2309 2310 nvmewqe->retry = 1; 2311 nvmewqe->vport = phba->pport; 2312 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 2313 nvmewqe->context1 = ndlp; 2314 2315 for (i = 0; i < rsp->sg_cnt; i++) { 2316 sgel = &rsp->sg[i]; 2317 physaddr = sg_dma_address(sgel); 2318 cnt = sg_dma_len(sgel); 2319 sgl->addr_hi = putPaddrHigh(physaddr); 2320 sgl->addr_lo = putPaddrLow(physaddr); 2321 sgl->word2 = 0; 2322 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2323 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); 2324 if ((i+1) == rsp->sg_cnt) 2325 bf_set(lpfc_sli4_sge_last, sgl, 1); 2326 sgl->word2 = cpu_to_le32(sgl->word2); 2327 sgl->sge_len = cpu_to_le32(cnt); 2328 sgl++; 2329 ctxp->offset += cnt; 2330 } 2331 ctxp->state = LPFC_NVMET_STE_DATA; 2332 ctxp->entry_cnt++; 2333 return nvmewqe; 2334 } 2335 2336 /** 2337 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS 2338 * @phba: Pointer to HBA context object. 2339 * @cmdwqe: Pointer to driver command WQE object. 2340 * @wcqe: Pointer to driver response CQE object. 2341 * 2342 * The function is called from SLI ring event handler with no 2343 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2344 * The function frees memory resources used for the NVME commands. 2345 **/ 2346 static void 2347 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2348 struct lpfc_wcqe_complete *wcqe) 2349 { 2350 struct lpfc_nvmet_rcv_ctx *ctxp; 2351 struct lpfc_nvmet_tgtport *tgtp; 2352 uint32_t status, result; 2353 unsigned long flags; 2354 bool released = false; 2355 2356 ctxp = cmdwqe->context2; 2357 status = bf_get(lpfc_wcqe_c_status, wcqe); 2358 result = wcqe->parameter; 2359 2360 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2361 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2362 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2363 2364 ctxp->state = LPFC_NVMET_STE_DONE; 2365 2366 /* Check if we already received a free context call 2367 * and we have completed processing an abort situation. 2368 */ 2369 spin_lock_irqsave(&ctxp->ctxlock, flags); 2370 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2371 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2372 list_del(&ctxp->list); 2373 released = true; 2374 } 2375 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2376 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2377 atomic_inc(&tgtp->xmt_abort_rsp); 2378 2379 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2380 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2381 "WCQE: %08x %08x %08x %08x\n", 2382 ctxp->oxid, ctxp->flag, released, 2383 wcqe->word0, wcqe->total_data_placed, 2384 result, wcqe->word3); 2385 2386 cmdwqe->context2 = NULL; 2387 cmdwqe->context3 = NULL; 2388 /* 2389 * if transport has released ctx, then can reuse it. Otherwise, 2390 * will be recycled by transport release call. 2391 */ 2392 if (released) 2393 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2394 2395 /* This is the iocbq for the abort, not the command */ 2396 lpfc_sli_release_iocbq(phba, cmdwqe); 2397 2398 /* Since iaab/iaar are NOT set, there is no work left. 2399 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2400 * should have been called already. 2401 */ 2402 } 2403 2404 /** 2405 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS 2406 * @phba: Pointer to HBA context object. 2407 * @cmdwqe: Pointer to driver command WQE object. 2408 * @wcqe: Pointer to driver response CQE object. 2409 * 2410 * The function is called from SLI ring event handler with no 2411 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2412 * The function frees memory resources used for the NVME commands. 2413 **/ 2414 static void 2415 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2416 struct lpfc_wcqe_complete *wcqe) 2417 { 2418 struct lpfc_nvmet_rcv_ctx *ctxp; 2419 struct lpfc_nvmet_tgtport *tgtp; 2420 unsigned long flags; 2421 uint32_t status, result; 2422 bool released = false; 2423 2424 ctxp = cmdwqe->context2; 2425 status = bf_get(lpfc_wcqe_c_status, wcqe); 2426 result = wcqe->parameter; 2427 2428 if (!ctxp) { 2429 /* if context is clear, related io alrady complete */ 2430 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2431 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", 2432 wcqe->word0, wcqe->total_data_placed, 2433 result, wcqe->word3); 2434 return; 2435 } 2436 2437 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2438 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2439 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2440 2441 /* Sanity check */ 2442 if (ctxp->state != LPFC_NVMET_STE_ABORT) { 2443 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2444 "6112 ABTS Wrong state:%d oxid x%x\n", 2445 ctxp->state, ctxp->oxid); 2446 } 2447 2448 /* Check if we already received a free context call 2449 * and we have completed processing an abort situation. 2450 */ 2451 ctxp->state = LPFC_NVMET_STE_DONE; 2452 spin_lock_irqsave(&ctxp->ctxlock, flags); 2453 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2454 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2455 list_del(&ctxp->list); 2456 released = true; 2457 } 2458 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2459 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2460 atomic_inc(&tgtp->xmt_abort_rsp); 2461 2462 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2463 "6316 ABTS cmpl xri x%x flg x%x (%x) " 2464 "WCQE: %08x %08x %08x %08x\n", 2465 ctxp->oxid, ctxp->flag, released, 2466 wcqe->word0, wcqe->total_data_placed, 2467 result, wcqe->word3); 2468 2469 cmdwqe->context2 = NULL; 2470 cmdwqe->context3 = NULL; 2471 /* 2472 * if transport has released ctx, then can reuse it. Otherwise, 2473 * will be recycled by transport release call. 2474 */ 2475 if (released) 2476 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2477 2478 /* Since iaab/iaar are NOT set, there is no work left. 2479 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2480 * should have been called already. 2481 */ 2482 } 2483 2484 /** 2485 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS 2486 * @phba: Pointer to HBA context object. 2487 * @cmdwqe: Pointer to driver command WQE object. 2488 * @wcqe: Pointer to driver response CQE object. 2489 * 2490 * The function is called from SLI ring event handler with no 2491 * lock held. This function is the completion handler for NVME ABTS for LS cmds 2492 * The function frees memory resources used for the NVME commands. 2493 **/ 2494 static void 2495 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2496 struct lpfc_wcqe_complete *wcqe) 2497 { 2498 struct lpfc_nvmet_rcv_ctx *ctxp; 2499 struct lpfc_nvmet_tgtport *tgtp; 2500 uint32_t status, result; 2501 2502 ctxp = cmdwqe->context2; 2503 status = bf_get(lpfc_wcqe_c_status, wcqe); 2504 result = wcqe->parameter; 2505 2506 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2507 atomic_inc(&tgtp->xmt_ls_abort_cmpl); 2508 2509 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2510 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", 2511 ctxp, wcqe->word0, wcqe->total_data_placed, 2512 result, wcqe->word3); 2513 2514 if (!ctxp) { 2515 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2516 "6415 NVMET LS Abort No ctx: WCQE: " 2517 "%08x %08x %08x %08x\n", 2518 wcqe->word0, wcqe->total_data_placed, 2519 result, wcqe->word3); 2520 2521 lpfc_sli_release_iocbq(phba, cmdwqe); 2522 return; 2523 } 2524 2525 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { 2526 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2527 "6416 NVMET LS abort cmpl state mismatch: " 2528 "oxid x%x: %d %d\n", 2529 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2530 } 2531 2532 cmdwqe->context2 = NULL; 2533 cmdwqe->context3 = NULL; 2534 lpfc_sli_release_iocbq(phba, cmdwqe); 2535 kfree(ctxp); 2536 } 2537 2538 static int 2539 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, 2540 struct lpfc_nvmet_rcv_ctx *ctxp, 2541 uint32_t sid, uint16_t xri) 2542 { 2543 struct lpfc_nvmet_tgtport *tgtp; 2544 struct lpfc_iocbq *abts_wqeq; 2545 union lpfc_wqe *wqe_abts; 2546 struct lpfc_nodelist *ndlp; 2547 2548 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2549 "6067 ABTS: sid %x xri x%x/x%x\n", 2550 sid, xri, ctxp->wqeq->sli4_xritag); 2551 2552 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2553 2554 ndlp = lpfc_findnode_did(phba->pport, sid); 2555 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2556 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2557 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2558 atomic_inc(&tgtp->xmt_abort_rsp_error); 2559 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2560 "6134 Drop ABTS - wrong NDLP state x%x.\n", 2561 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2562 2563 /* No failure to an ABTS request. */ 2564 return 0; 2565 } 2566 2567 abts_wqeq = ctxp->wqeq; 2568 wqe_abts = &abts_wqeq->wqe; 2569 2570 /* 2571 * Since we zero the whole WQE, we need to ensure we set the WQE fields 2572 * that were initialized in lpfc_sli4_nvmet_alloc. 2573 */ 2574 memset(wqe_abts, 0, sizeof(union lpfc_wqe)); 2575 2576 /* Word 5 */ 2577 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); 2578 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); 2579 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); 2580 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); 2581 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); 2582 2583 /* Word 6 */ 2584 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, 2585 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2586 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, 2587 abts_wqeq->sli4_xritag); 2588 2589 /* Word 7 */ 2590 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, 2591 CMD_XMIT_SEQUENCE64_WQE); 2592 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); 2593 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); 2594 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); 2595 2596 /* Word 8 */ 2597 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; 2598 2599 /* Word 9 */ 2600 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); 2601 /* Needs to be set by caller */ 2602 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); 2603 2604 /* Word 10 */ 2605 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); 2606 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 2607 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, 2608 LPFC_WQE_LENLOC_WORD12); 2609 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); 2610 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); 2611 2612 /* Word 11 */ 2613 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, 2614 LPFC_WQE_CQ_ID_DEFAULT); 2615 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, 2616 OTHER_COMMAND); 2617 2618 abts_wqeq->vport = phba->pport; 2619 abts_wqeq->context1 = ndlp; 2620 abts_wqeq->context2 = ctxp; 2621 abts_wqeq->context3 = NULL; 2622 abts_wqeq->rsvd2 = 0; 2623 /* hba_wqidx should already be setup from command we are aborting */ 2624 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2625 abts_wqeq->iocb.ulpLe = 1; 2626 2627 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2628 "6069 Issue ABTS to xri x%x reqtag x%x\n", 2629 xri, abts_wqeq->iotag); 2630 return 1; 2631 } 2632 2633 static int 2634 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, 2635 struct lpfc_nvmet_rcv_ctx *ctxp, 2636 uint32_t sid, uint16_t xri) 2637 { 2638 struct lpfc_nvmet_tgtport *tgtp; 2639 struct lpfc_iocbq *abts_wqeq; 2640 union lpfc_wqe *abts_wqe; 2641 struct lpfc_nodelist *ndlp; 2642 unsigned long flags; 2643 int rc; 2644 2645 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2646 if (!ctxp->wqeq) { 2647 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2648 ctxp->wqeq->hba_wqidx = 0; 2649 } 2650 2651 ndlp = lpfc_findnode_did(phba->pport, sid); 2652 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2653 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2654 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2655 atomic_inc(&tgtp->xmt_abort_rsp_error); 2656 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2657 "6160 Drop ABORT - wrong NDLP state x%x.\n", 2658 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2659 2660 /* No failure to an ABTS request. */ 2661 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2662 return 0; 2663 } 2664 2665 /* Issue ABTS for this WQE based on iotag */ 2666 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2667 if (!ctxp->abort_wqeq) { 2668 atomic_inc(&tgtp->xmt_abort_rsp_error); 2669 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2670 "6161 ABORT failed: No wqeqs: " 2671 "xri: x%x\n", ctxp->oxid); 2672 /* No failure to an ABTS request. */ 2673 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2674 return 0; 2675 } 2676 abts_wqeq = ctxp->abort_wqeq; 2677 abts_wqe = &abts_wqeq->wqe; 2678 ctxp->state = LPFC_NVMET_STE_ABORT; 2679 2680 /* Announce entry to new IO submit field. */ 2681 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2682 "6162 ABORT Request to rport DID x%06x " 2683 "for xri x%x x%x\n", 2684 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); 2685 2686 /* If the hba is getting reset, this flag is set. It is 2687 * cleared when the reset is complete and rings reestablished. 2688 */ 2689 spin_lock_irqsave(&phba->hbalock, flags); 2690 /* driver queued commands are in process of being flushed */ 2691 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2692 spin_unlock_irqrestore(&phba->hbalock, flags); 2693 atomic_inc(&tgtp->xmt_abort_rsp_error); 2694 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2695 "6163 Driver in reset cleanup - flushing " 2696 "NVME Req now. hba_flag x%x oxid x%x\n", 2697 phba->hba_flag, ctxp->oxid); 2698 lpfc_sli_release_iocbq(phba, abts_wqeq); 2699 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2700 return 0; 2701 } 2702 2703 /* Outstanding abort is in progress */ 2704 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2705 spin_unlock_irqrestore(&phba->hbalock, flags); 2706 atomic_inc(&tgtp->xmt_abort_rsp_error); 2707 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2708 "6164 Outstanding NVME I/O Abort Request " 2709 "still pending on oxid x%x\n", 2710 ctxp->oxid); 2711 lpfc_sli_release_iocbq(phba, abts_wqeq); 2712 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2713 return 0; 2714 } 2715 2716 /* Ready - mark outstanding as aborted by driver. */ 2717 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; 2718 2719 /* WQEs are reused. Clear stale data and set key fields to 2720 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 2721 */ 2722 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 2723 2724 /* word 3 */ 2725 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 2726 2727 /* word 7 */ 2728 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 2729 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 2730 2731 /* word 8 - tell the FW to abort the IO associated with this 2732 * outstanding exchange ID. 2733 */ 2734 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; 2735 2736 /* word 9 - this is the iotag for the abts_wqe completion. */ 2737 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 2738 abts_wqeq->iotag); 2739 2740 /* word 10 */ 2741 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 2742 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 2743 2744 /* word 11 */ 2745 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 2746 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 2747 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 2748 2749 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 2750 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; 2751 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; 2752 abts_wqeq->iocb_cmpl = 0; 2753 abts_wqeq->iocb_flag |= LPFC_IO_NVME; 2754 abts_wqeq->context2 = ctxp; 2755 abts_wqeq->vport = phba->pport; 2756 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2757 spin_unlock_irqrestore(&phba->hbalock, flags); 2758 if (rc == WQE_SUCCESS) { 2759 atomic_inc(&tgtp->xmt_abort_sol); 2760 return 0; 2761 } 2762 2763 atomic_inc(&tgtp->xmt_abort_rsp_error); 2764 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2765 lpfc_sli_release_iocbq(phba, abts_wqeq); 2766 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2767 "6166 Failed ABORT issue_wqe with status x%x " 2768 "for oxid x%x.\n", 2769 rc, ctxp->oxid); 2770 return 1; 2771 } 2772 2773 2774 static int 2775 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, 2776 struct lpfc_nvmet_rcv_ctx *ctxp, 2777 uint32_t sid, uint16_t xri) 2778 { 2779 struct lpfc_nvmet_tgtport *tgtp; 2780 struct lpfc_iocbq *abts_wqeq; 2781 unsigned long flags; 2782 int rc; 2783 2784 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2785 if (!ctxp->wqeq) { 2786 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2787 ctxp->wqeq->hba_wqidx = 0; 2788 } 2789 2790 if (ctxp->state == LPFC_NVMET_STE_FREE) { 2791 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2792 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", 2793 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 2794 rc = WQE_BUSY; 2795 goto aerr; 2796 } 2797 ctxp->state = LPFC_NVMET_STE_ABORT; 2798 ctxp->entry_cnt++; 2799 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2800 if (rc == 0) 2801 goto aerr; 2802 2803 spin_lock_irqsave(&phba->hbalock, flags); 2804 abts_wqeq = ctxp->wqeq; 2805 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; 2806 abts_wqeq->iocb_cmpl = NULL; 2807 abts_wqeq->iocb_flag |= LPFC_IO_NVMET; 2808 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2809 spin_unlock_irqrestore(&phba->hbalock, flags); 2810 if (rc == WQE_SUCCESS) { 2811 return 0; 2812 } 2813 2814 aerr: 2815 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2816 atomic_inc(&tgtp->xmt_abort_rsp_error); 2817 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2818 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", 2819 ctxp->oxid, rc); 2820 return 1; 2821 } 2822 2823 static int 2824 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, 2825 struct lpfc_nvmet_rcv_ctx *ctxp, 2826 uint32_t sid, uint16_t xri) 2827 { 2828 struct lpfc_nvmet_tgtport *tgtp; 2829 struct lpfc_iocbq *abts_wqeq; 2830 union lpfc_wqe *wqe_abts; 2831 unsigned long flags; 2832 int rc; 2833 2834 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || 2835 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { 2836 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2837 ctxp->entry_cnt++; 2838 } else { 2839 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2840 "6418 NVMET LS abort state mismatch " 2841 "IO x%x: %d %d\n", 2842 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2843 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2844 } 2845 2846 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2847 if (!ctxp->wqeq) { 2848 /* Issue ABTS for this WQE based on iotag */ 2849 ctxp->wqeq = lpfc_sli_get_iocbq(phba); 2850 if (!ctxp->wqeq) { 2851 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2852 "6068 Abort failed: No wqeqs: " 2853 "xri: x%x\n", xri); 2854 /* No failure to an ABTS request. */ 2855 kfree(ctxp); 2856 return 0; 2857 } 2858 } 2859 abts_wqeq = ctxp->wqeq; 2860 wqe_abts = &abts_wqeq->wqe; 2861 2862 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { 2863 rc = WQE_BUSY; 2864 goto out; 2865 } 2866 2867 spin_lock_irqsave(&phba->hbalock, flags); 2868 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; 2869 abts_wqeq->iocb_cmpl = 0; 2870 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; 2871 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2872 spin_unlock_irqrestore(&phba->hbalock, flags); 2873 if (rc == WQE_SUCCESS) { 2874 atomic_inc(&tgtp->xmt_abort_unsol); 2875 return 0; 2876 } 2877 out: 2878 atomic_inc(&tgtp->xmt_abort_rsp_error); 2879 abts_wqeq->context2 = NULL; 2880 abts_wqeq->context3 = NULL; 2881 lpfc_sli_release_iocbq(phba, abts_wqeq); 2882 kfree(ctxp); 2883 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2884 "6056 Failed to Issue ABTS. Status x%x\n", rc); 2885 return 0; 2886 } 2887