1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channsel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include <../drivers/nvme/host/nvme.h> 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_version.h" 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc.h" 50 #include "lpfc_scsi.h" 51 #include "lpfc_nvme.h" 52 #include "lpfc_nvmet.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_vport.h" 56 #include "lpfc_debugfs.h" 57 58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 59 struct lpfc_nvmet_rcv_ctx *, 60 dma_addr_t rspbuf, 61 uint16_t rspsize); 62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, 63 struct lpfc_nvmet_rcv_ctx *); 64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, 65 struct lpfc_nvmet_rcv_ctx *, 66 uint32_t, uint16_t); 67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, 68 struct lpfc_nvmet_rcv_ctx *, 69 uint32_t, uint16_t); 70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, 71 struct lpfc_nvmet_rcv_ctx *, 72 uint32_t, uint16_t); 73 74 void 75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) 76 { 77 unsigned long iflag; 78 79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 80 "6313 NVMET Defer ctx release xri x%x flg x%x\n", 81 ctxp->oxid, ctxp->flag); 82 83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) { 85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 86 iflag); 87 return; 88 } 89 ctxp->flag |= LPFC_NVMET_CTX_RLS; 90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 92 } 93 94 /** 95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response 96 * @phba: Pointer to HBA context object. 97 * @cmdwqe: Pointer to driver command WQE object. 98 * @wcqe: Pointer to driver response CQE object. 99 * 100 * The function is called from SLI ring event handler with no 101 * lock held. This function is the completion handler for NVME LS commands 102 * The function frees memory resources used for the NVME commands. 103 **/ 104 static void 105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 106 struct lpfc_wcqe_complete *wcqe) 107 { 108 struct lpfc_nvmet_tgtport *tgtp; 109 struct nvmefc_tgt_ls_req *rsp; 110 struct lpfc_nvmet_rcv_ctx *ctxp; 111 uint32_t status, result; 112 113 status = bf_get(lpfc_wcqe_c_status, wcqe); 114 result = wcqe->parameter; 115 ctxp = cmdwqe->context2; 116 117 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { 118 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 119 "6410 NVMET LS cmpl state mismatch IO x%x: " 120 "%d %d\n", 121 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 122 } 123 124 if (!phba->targetport) 125 goto out; 126 127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 128 129 if (status) 130 atomic_inc(&tgtp->xmt_ls_rsp_error); 131 else 132 atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 133 134 out: 135 rsp = &ctxp->ctx.ls_req; 136 137 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", 138 ctxp->oxid, status, result); 139 140 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 141 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", 142 status, result, ctxp->oxid); 143 144 lpfc_nlp_put(cmdwqe->context1); 145 cmdwqe->context2 = NULL; 146 cmdwqe->context3 = NULL; 147 lpfc_sli_release_iocbq(phba, cmdwqe); 148 rsp->done(rsp); 149 kfree(ctxp); 150 } 151 152 /** 153 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context 154 * @phba: HBA buffer is associated with 155 * @ctxp: context to clean up 156 * @mp: Buffer to free 157 * 158 * Description: Frees the given DMA buffer in the appropriate way given by 159 * reposting it to its associated RQ so it can be reused. 160 * 161 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 162 * 163 * Returns: None 164 **/ 165 void 166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) 167 { 168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 169 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; 170 struct lpfc_nvmet_tgtport *tgtp; 171 struct fc_frame_header *fc_hdr; 172 struct rqb_dmabuf *nvmebuf; 173 uint32_t *payload; 174 uint32_t size, oxid, sid, rc; 175 unsigned long iflag; 176 177 if (ctxp->txrdy) { 178 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 179 ctxp->txrdy_phys); 180 ctxp->txrdy = NULL; 181 ctxp->txrdy_phys = 0; 182 } 183 184 if (ctxp->state == LPFC_NVMET_STE_FREE) { 185 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 186 "6411 NVMET free, already free IO x%x: %d %d\n", 187 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 188 } 189 ctxp->state = LPFC_NVMET_STE_FREE; 190 191 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 192 if (phba->sli4_hba.nvmet_io_wait_cnt) { 193 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, 194 nvmebuf, struct rqb_dmabuf, 195 hbuf.list); 196 phba->sli4_hba.nvmet_io_wait_cnt--; 197 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 198 iflag); 199 200 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 201 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 202 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 203 payload = (uint32_t *)(nvmebuf->dbuf.virt); 204 size = nvmebuf->bytes_recv; 205 sid = sli4_sid_from_fc_hdr(fc_hdr); 206 207 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 208 ctxp->wqeq = NULL; 209 ctxp->txrdy = NULL; 210 ctxp->offset = 0; 211 ctxp->phba = phba; 212 ctxp->size = size; 213 ctxp->oxid = oxid; 214 ctxp->sid = sid; 215 ctxp->state = LPFC_NVMET_STE_RCV; 216 ctxp->entry_cnt = 1; 217 ctxp->flag = 0; 218 ctxp->ctxbuf = ctx_buf; 219 spin_lock_init(&ctxp->ctxlock); 220 221 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 222 if (phba->ktime_on) { 223 ctxp->ts_cmd_nvme = ktime_get_ns(); 224 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme; 225 ctxp->ts_nvme_data = 0; 226 ctxp->ts_data_wqput = 0; 227 ctxp->ts_isr_data = 0; 228 ctxp->ts_data_nvme = 0; 229 ctxp->ts_nvme_status = 0; 230 ctxp->ts_status_wqput = 0; 231 ctxp->ts_isr_status = 0; 232 ctxp->ts_status_nvme = 0; 233 } 234 #endif 235 atomic_inc(&tgtp->rcv_fcp_cmd_in); 236 /* 237 * The calling sequence should be: 238 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done 239 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 240 * When we return from nvmet_fc_rcv_fcp_req, all relevant info 241 * the NVME command / FC header is stored. 242 * A buffer has already been reposted for this IO, so just free 243 * the nvmebuf. 244 */ 245 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 246 payload, size); 247 248 /* Process FCP command */ 249 if (rc == 0) { 250 atomic_inc(&tgtp->rcv_fcp_cmd_out); 251 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 252 return; 253 } 254 255 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 256 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 257 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 258 ctxp->oxid, rc, 259 atomic_read(&tgtp->rcv_fcp_cmd_in), 260 atomic_read(&tgtp->rcv_fcp_cmd_out), 261 atomic_read(&tgtp->xmt_fcp_release)); 262 263 lpfc_nvmet_defer_release(phba, ctxp); 264 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 265 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 266 return; 267 } 268 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 269 270 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); 271 list_add_tail(&ctx_buf->list, 272 &phba->sli4_hba.lpfc_nvmet_ctx_put_list); 273 phba->sli4_hba.nvmet_ctx_put_cnt++; 274 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); 275 #endif 276 } 277 278 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 279 static void 280 lpfc_nvmet_ktime(struct lpfc_hba *phba, 281 struct lpfc_nvmet_rcv_ctx *ctxp) 282 { 283 uint64_t seg1, seg2, seg3, seg4, seg5; 284 uint64_t seg6, seg7, seg8, seg9, seg10; 285 286 if (!phba->ktime_on) 287 return; 288 289 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 290 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 291 !ctxp->ts_isr_data || !ctxp->ts_data_nvme || 292 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || 293 !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 294 return; 295 296 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 297 return; 298 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 299 return; 300 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) 301 return; 302 if (ctxp->ts_data_wqput > ctxp->ts_isr_data) 303 return; 304 if (ctxp->ts_isr_data > ctxp->ts_data_nvme) 305 return; 306 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) 307 return; 308 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) 309 return; 310 if (ctxp->ts_status_wqput > ctxp->ts_isr_status) 311 return; 312 if (ctxp->ts_isr_status > ctxp->ts_status_nvme) 313 return; 314 /* 315 * Segment 1 - Time from FCP command received by MSI-X ISR 316 * to FCP command is passed to NVME Layer. 317 * Segment 2 - Time from FCP command payload handed 318 * off to NVME Layer to Driver receives a Command op 319 * from NVME Layer. 320 * Segment 3 - Time from Driver receives a Command op 321 * from NVME Layer to Command is put on WQ. 322 * Segment 4 - Time from Driver WQ put is done 323 * to MSI-X ISR for Command cmpl. 324 * Segment 5 - Time from MSI-X ISR for Command cmpl to 325 * Command cmpl is passed to NVME Layer. 326 * Segment 6 - Time from Command cmpl is passed to NVME 327 * Layer to Driver receives a RSP op from NVME Layer. 328 * Segment 7 - Time from Driver receives a RSP op from 329 * NVME Layer to WQ put is done on TRSP FCP Status. 330 * Segment 8 - Time from Driver WQ put is done on TRSP 331 * FCP Status to MSI-X ISR for TRSP cmpl. 332 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to 333 * TRSP cmpl is passed to NVME Layer. 334 * Segment 10 - Time from FCP command received by 335 * MSI-X ISR to command is completed on wire. 336 * (Segments 1 thru 8) for READDATA / WRITEDATA 337 * (Segments 1 thru 4) for READDATA_RSP 338 */ 339 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 340 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; 341 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - 342 seg1 - seg2; 343 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - 344 seg1 - seg2 - seg3; 345 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - 346 seg1 - seg2 - seg3 - seg4; 347 348 /* For auto rsp commands seg6 thru seg10 will be 0 */ 349 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 350 seg6 = (ctxp->ts_nvme_status - 351 ctxp->ts_isr_cmd) - 352 seg1 - seg2 - seg3 - seg4 - seg5; 353 seg7 = (ctxp->ts_status_wqput - 354 ctxp->ts_isr_cmd) - 355 seg1 - seg2 - seg3 - 356 seg4 - seg5 - seg6; 357 seg8 = (ctxp->ts_isr_status - 358 ctxp->ts_isr_cmd) - 359 seg1 - seg2 - seg3 - seg4 - 360 seg5 - seg6 - seg7; 361 seg9 = (ctxp->ts_status_nvme - 362 ctxp->ts_isr_cmd) - 363 seg1 - seg2 - seg3 - seg4 - 364 seg5 - seg6 - seg7 - seg8; 365 seg10 = (ctxp->ts_isr_status - 366 ctxp->ts_isr_cmd); 367 } else { 368 seg6 = 0; 369 seg7 = 0; 370 seg8 = 0; 371 seg9 = 0; 372 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); 373 } 374 375 phba->ktime_seg1_total += seg1; 376 if (seg1 < phba->ktime_seg1_min) 377 phba->ktime_seg1_min = seg1; 378 else if (seg1 > phba->ktime_seg1_max) 379 phba->ktime_seg1_max = seg1; 380 381 phba->ktime_seg2_total += seg2; 382 if (seg2 < phba->ktime_seg2_min) 383 phba->ktime_seg2_min = seg2; 384 else if (seg2 > phba->ktime_seg2_max) 385 phba->ktime_seg2_max = seg2; 386 387 phba->ktime_seg3_total += seg3; 388 if (seg3 < phba->ktime_seg3_min) 389 phba->ktime_seg3_min = seg3; 390 else if (seg3 > phba->ktime_seg3_max) 391 phba->ktime_seg3_max = seg3; 392 393 phba->ktime_seg4_total += seg4; 394 if (seg4 < phba->ktime_seg4_min) 395 phba->ktime_seg4_min = seg4; 396 else if (seg4 > phba->ktime_seg4_max) 397 phba->ktime_seg4_max = seg4; 398 399 phba->ktime_seg5_total += seg5; 400 if (seg5 < phba->ktime_seg5_min) 401 phba->ktime_seg5_min = seg5; 402 else if (seg5 > phba->ktime_seg5_max) 403 phba->ktime_seg5_max = seg5; 404 405 phba->ktime_data_samples++; 406 if (!seg6) 407 goto out; 408 409 phba->ktime_seg6_total += seg6; 410 if (seg6 < phba->ktime_seg6_min) 411 phba->ktime_seg6_min = seg6; 412 else if (seg6 > phba->ktime_seg6_max) 413 phba->ktime_seg6_max = seg6; 414 415 phba->ktime_seg7_total += seg7; 416 if (seg7 < phba->ktime_seg7_min) 417 phba->ktime_seg7_min = seg7; 418 else if (seg7 > phba->ktime_seg7_max) 419 phba->ktime_seg7_max = seg7; 420 421 phba->ktime_seg8_total += seg8; 422 if (seg8 < phba->ktime_seg8_min) 423 phba->ktime_seg8_min = seg8; 424 else if (seg8 > phba->ktime_seg8_max) 425 phba->ktime_seg8_max = seg8; 426 427 phba->ktime_seg9_total += seg9; 428 if (seg9 < phba->ktime_seg9_min) 429 phba->ktime_seg9_min = seg9; 430 else if (seg9 > phba->ktime_seg9_max) 431 phba->ktime_seg9_max = seg9; 432 out: 433 phba->ktime_seg10_total += seg10; 434 if (seg10 < phba->ktime_seg10_min) 435 phba->ktime_seg10_min = seg10; 436 else if (seg10 > phba->ktime_seg10_max) 437 phba->ktime_seg10_max = seg10; 438 phba->ktime_status_samples++; 439 } 440 #endif 441 442 /** 443 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 444 * @phba: Pointer to HBA context object. 445 * @cmdwqe: Pointer to driver command WQE object. 446 * @wcqe: Pointer to driver response CQE object. 447 * 448 * The function is called from SLI ring event handler with no 449 * lock held. This function is the completion handler for NVME FCP commands 450 * The function frees memory resources used for the NVME commands. 451 **/ 452 static void 453 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 454 struct lpfc_wcqe_complete *wcqe) 455 { 456 struct lpfc_nvmet_tgtport *tgtp; 457 struct nvmefc_tgt_fcp_req *rsp; 458 struct lpfc_nvmet_rcv_ctx *ctxp; 459 uint32_t status, result, op, start_clean; 460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 461 uint32_t id; 462 #endif 463 464 ctxp = cmdwqe->context2; 465 ctxp->flag &= ~LPFC_NVMET_IO_INP; 466 467 rsp = &ctxp->ctx.fcp_req; 468 op = rsp->op; 469 470 status = bf_get(lpfc_wcqe_c_status, wcqe); 471 result = wcqe->parameter; 472 473 if (phba->targetport) 474 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 475 else 476 tgtp = NULL; 477 478 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", 479 ctxp->oxid, op, status); 480 481 if (status) { 482 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 483 rsp->transferred_length = 0; 484 if (tgtp) 485 atomic_inc(&tgtp->xmt_fcp_rsp_error); 486 487 /* pick up SLI4 exhange busy condition */ 488 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 489 ctxp->flag |= LPFC_NVMET_XBUSY; 490 491 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 492 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n", 493 ctxp->oxid, status, result); 494 } else { 495 ctxp->flag &= ~LPFC_NVMET_XBUSY; 496 } 497 498 } else { 499 rsp->fcp_error = NVME_SC_SUCCESS; 500 if (op == NVMET_FCOP_RSP) 501 rsp->transferred_length = rsp->rsplen; 502 else 503 rsp->transferred_length = rsp->transfer_length; 504 if (tgtp) 505 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); 506 } 507 508 if ((op == NVMET_FCOP_READDATA_RSP) || 509 (op == NVMET_FCOP_RSP)) { 510 /* Sanity check */ 511 ctxp->state = LPFC_NVMET_STE_DONE; 512 ctxp->entry_cnt++; 513 514 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 515 if (phba->ktime_on) { 516 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 517 ctxp->ts_isr_data = 518 cmdwqe->isr_timestamp; 519 ctxp->ts_data_nvme = 520 ktime_get_ns(); 521 ctxp->ts_nvme_status = 522 ctxp->ts_data_nvme; 523 ctxp->ts_status_wqput = 524 ctxp->ts_data_nvme; 525 ctxp->ts_isr_status = 526 ctxp->ts_data_nvme; 527 ctxp->ts_status_nvme = 528 ctxp->ts_data_nvme; 529 } else { 530 ctxp->ts_isr_status = 531 cmdwqe->isr_timestamp; 532 ctxp->ts_status_nvme = 533 ktime_get_ns(); 534 } 535 } 536 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 537 id = smp_processor_id(); 538 if (ctxp->cpu != id) 539 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 540 "6703 CPU Check cmpl: " 541 "cpu %d expect %d\n", 542 id, ctxp->cpu); 543 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 544 phba->cpucheck_cmpl_io[id]++; 545 } 546 #endif 547 rsp->done(rsp); 548 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 549 if (phba->ktime_on) 550 lpfc_nvmet_ktime(phba, ctxp); 551 #endif 552 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ 553 } else { 554 ctxp->entry_cnt++; 555 start_clean = offsetof(struct lpfc_iocbq, wqe); 556 memset(((char *)cmdwqe) + start_clean, 0, 557 (sizeof(struct lpfc_iocbq) - start_clean)); 558 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 559 if (phba->ktime_on) { 560 ctxp->ts_isr_data = cmdwqe->isr_timestamp; 561 ctxp->ts_data_nvme = ktime_get_ns(); 562 } 563 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 564 id = smp_processor_id(); 565 if (ctxp->cpu != id) 566 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 567 "6704 CPU Check cmdcmpl: " 568 "cpu %d expect %d\n", 569 id, ctxp->cpu); 570 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 571 phba->cpucheck_ccmpl_io[id]++; 572 } 573 #endif 574 rsp->done(rsp); 575 } 576 } 577 578 static int 579 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, 580 struct nvmefc_tgt_ls_req *rsp) 581 { 582 struct lpfc_nvmet_rcv_ctx *ctxp = 583 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); 584 struct lpfc_hba *phba = ctxp->phba; 585 struct hbq_dmabuf *nvmebuf = 586 (struct hbq_dmabuf *)ctxp->rqb_buffer; 587 struct lpfc_iocbq *nvmewqeq; 588 struct lpfc_nvmet_tgtport *nvmep = tgtport->private; 589 struct lpfc_dmabuf dmabuf; 590 struct ulp_bde64 bpl; 591 int rc; 592 593 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 594 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); 595 596 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || 597 (ctxp->entry_cnt != 1)) { 598 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 599 "6412 NVMET LS rsp state mismatch " 600 "oxid x%x: %d %d\n", 601 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 602 } 603 ctxp->state = LPFC_NVMET_STE_LS_RSP; 604 ctxp->entry_cnt++; 605 606 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, 607 rsp->rsplen); 608 if (nvmewqeq == NULL) { 609 atomic_inc(&nvmep->xmt_ls_drop); 610 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 611 "6150 LS Drop IO x%x: Prep\n", 612 ctxp->oxid); 613 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 614 atomic_inc(&nvmep->xmt_ls_abort); 615 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 616 ctxp->sid, ctxp->oxid); 617 return -ENOMEM; 618 } 619 620 /* Save numBdes for bpl2sgl */ 621 nvmewqeq->rsvd2 = 1; 622 nvmewqeq->hba_wqidx = 0; 623 nvmewqeq->context3 = &dmabuf; 624 dmabuf.virt = &bpl; 625 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; 626 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; 627 bpl.tus.f.bdeSize = rsp->rsplen; 628 bpl.tus.f.bdeFlags = 0; 629 bpl.tus.w = le32_to_cpu(bpl.tus.w); 630 631 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; 632 nvmewqeq->iocb_cmpl = NULL; 633 nvmewqeq->context2 = ctxp; 634 635 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", 636 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); 637 638 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 639 if (rc == WQE_SUCCESS) { 640 /* 641 * Okay to repost buffer here, but wait till cmpl 642 * before freeing ctxp and iocbq. 643 */ 644 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 645 ctxp->rqb_buffer = 0; 646 atomic_inc(&nvmep->xmt_ls_rsp); 647 return 0; 648 } 649 /* Give back resources */ 650 atomic_inc(&nvmep->xmt_ls_drop); 651 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 652 "6151 LS Drop IO x%x: Issue %d\n", 653 ctxp->oxid, rc); 654 655 lpfc_nlp_put(nvmewqeq->context1); 656 657 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 658 atomic_inc(&nvmep->xmt_ls_abort); 659 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 660 return -ENXIO; 661 } 662 663 static int 664 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, 665 struct nvmefc_tgt_fcp_req *rsp) 666 { 667 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 668 struct lpfc_nvmet_rcv_ctx *ctxp = 669 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 670 struct lpfc_hba *phba = ctxp->phba; 671 struct lpfc_iocbq *nvmewqeq; 672 int rc; 673 674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 675 if (phba->ktime_on) { 676 if (rsp->op == NVMET_FCOP_RSP) 677 ctxp->ts_nvme_status = ktime_get_ns(); 678 else 679 ctxp->ts_nvme_data = ktime_get_ns(); 680 } 681 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 682 int id = smp_processor_id(); 683 ctxp->cpu = id; 684 if (id < LPFC_CHECK_CPU_CNT) 685 phba->cpucheck_xmt_io[id]++; 686 if (rsp->hwqid != id) { 687 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 688 "6705 CPU Check OP: " 689 "cpu %d expect %d\n", 690 id, rsp->hwqid); 691 ctxp->cpu = rsp->hwqid; 692 } 693 } 694 #endif 695 696 /* Sanity check */ 697 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || 698 (ctxp->state == LPFC_NVMET_STE_ABORT)) { 699 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 700 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 701 "6102 IO xri x%x aborted\n", 702 ctxp->oxid); 703 rc = -ENXIO; 704 goto aerr; 705 } 706 707 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 708 if (nvmewqeq == NULL) { 709 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 710 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 711 "6152 FCP Drop IO x%x: Prep\n", 712 ctxp->oxid); 713 rc = -ENXIO; 714 goto aerr; 715 } 716 717 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; 718 nvmewqeq->iocb_cmpl = NULL; 719 nvmewqeq->context2 = ctxp; 720 nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 721 ctxp->wqeq->hba_wqidx = rsp->hwqid; 722 723 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 724 ctxp->oxid, rsp->op, rsp->rsplen); 725 726 ctxp->flag |= LPFC_NVMET_IO_INP; 727 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 728 if (rc == WQE_SUCCESS) { 729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 730 if (!phba->ktime_on) 731 return 0; 732 if (rsp->op == NVMET_FCOP_RSP) 733 ctxp->ts_status_wqput = ktime_get_ns(); 734 else 735 ctxp->ts_data_wqput = ktime_get_ns(); 736 #endif 737 return 0; 738 } 739 740 /* Give back resources */ 741 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 742 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 743 "6153 FCP Drop IO x%x: Issue: %d\n", 744 ctxp->oxid, rc); 745 746 ctxp->wqeq->hba_wqidx = 0; 747 nvmewqeq->context2 = NULL; 748 nvmewqeq->context3 = NULL; 749 rc = -EBUSY; 750 aerr: 751 return rc; 752 } 753 754 static void 755 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) 756 { 757 struct lpfc_nvmet_tgtport *tport = targetport->private; 758 759 /* release any threads waiting for the unreg to complete */ 760 complete(&tport->tport_unreg_done); 761 } 762 763 static void 764 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, 765 struct nvmefc_tgt_fcp_req *req) 766 { 767 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 768 struct lpfc_nvmet_rcv_ctx *ctxp = 769 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 770 struct lpfc_hba *phba = ctxp->phba; 771 unsigned long flags; 772 773 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 774 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", 775 ctxp->oxid, ctxp->flag, ctxp->state); 776 777 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", 778 ctxp->oxid, ctxp->flag, ctxp->state); 779 780 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 781 782 spin_lock_irqsave(&ctxp->ctxlock, flags); 783 784 /* Since iaab/iaar are NOT set, we need to check 785 * if the firmware is in process of aborting IO 786 */ 787 if (ctxp->flag & LPFC_NVMET_XBUSY) { 788 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 789 return; 790 } 791 ctxp->flag |= LPFC_NVMET_ABORT_OP; 792 793 /* An state of LPFC_NVMET_STE_RCV means we have just received 794 * the NVME command and have not started processing it. 795 * (by issuing any IO WQEs on this exchange yet) 796 */ 797 if (ctxp->state == LPFC_NVMET_STE_RCV) 798 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 799 ctxp->oxid); 800 else 801 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, 802 ctxp->oxid); 803 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 804 } 805 806 static void 807 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 808 struct nvmefc_tgt_fcp_req *rsp) 809 { 810 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 811 struct lpfc_nvmet_rcv_ctx *ctxp = 812 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 813 struct lpfc_hba *phba = ctxp->phba; 814 unsigned long flags; 815 bool aborting = false; 816 817 if (ctxp->state != LPFC_NVMET_STE_DONE && 818 ctxp->state != LPFC_NVMET_STE_ABORT) { 819 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 820 "6413 NVMET release bad state %d %d oxid x%x\n", 821 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 822 } 823 824 spin_lock_irqsave(&ctxp->ctxlock, flags); 825 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || 826 (ctxp->flag & LPFC_NVMET_XBUSY)) { 827 aborting = true; 828 /* let the abort path do the real release */ 829 lpfc_nvmet_defer_release(phba, ctxp); 830 } 831 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 832 833 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, 834 ctxp->state, aborting); 835 836 atomic_inc(&lpfc_nvmep->xmt_fcp_release); 837 838 if (aborting) 839 return; 840 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 842 } 843 844 static struct nvmet_fc_target_template lpfc_tgttemplate = { 845 .targetport_delete = lpfc_nvmet_targetport_delete, 846 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 847 .fcp_op = lpfc_nvmet_xmt_fcp_op, 848 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 849 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 850 851 .max_hw_queues = 1, 852 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 853 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 854 .dma_boundary = 0xFFFFFFFF, 855 856 /* optional features */ 857 .target_features = 0, 858 /* sizes of additional private data for data structures */ 859 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 860 }; 861 862 static void 863 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) 864 { 865 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; 866 unsigned long flags; 867 868 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags); 869 spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); 870 list_for_each_entry_safe(ctx_buf, next_ctx_buf, 871 &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) { 872 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 873 list_del_init(&ctx_buf->list); 874 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 875 __lpfc_clear_active_sglq(phba, 876 ctx_buf->sglq->sli4_lxritag); 877 ctx_buf->sglq->state = SGL_FREED; 878 ctx_buf->sglq->ndlp = NULL; 879 880 spin_lock(&phba->sli4_hba.sgl_list_lock); 881 list_add_tail(&ctx_buf->sglq->list, 882 &phba->sli4_hba.lpfc_nvmet_sgl_list); 883 spin_unlock(&phba->sli4_hba.sgl_list_lock); 884 885 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 886 kfree(ctx_buf->context); 887 } 888 list_for_each_entry_safe(ctx_buf, next_ctx_buf, 889 &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) { 890 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 891 list_del_init(&ctx_buf->list); 892 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 893 __lpfc_clear_active_sglq(phba, 894 ctx_buf->sglq->sli4_lxritag); 895 ctx_buf->sglq->state = SGL_FREED; 896 ctx_buf->sglq->ndlp = NULL; 897 898 spin_lock(&phba->sli4_hba.sgl_list_lock); 899 list_add_tail(&ctx_buf->sglq->list, 900 &phba->sli4_hba.lpfc_nvmet_sgl_list); 901 spin_unlock(&phba->sli4_hba.sgl_list_lock); 902 903 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 904 kfree(ctx_buf->context); 905 } 906 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 907 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags); 908 } 909 910 static int 911 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) 912 { 913 struct lpfc_nvmet_ctxbuf *ctx_buf; 914 struct lpfc_iocbq *nvmewqe; 915 union lpfc_wqe128 *wqe; 916 int i; 917 918 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 919 "6403 Allocate NVMET resources for %d XRIs\n", 920 phba->sli4_hba.nvmet_xri_cnt); 921 922 /* For all nvmet xris, allocate resources needed to process a 923 * received command on a per xri basis. 924 */ 925 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { 926 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); 927 if (!ctx_buf) { 928 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 929 "6404 Ran out of memory for NVMET\n"); 930 return -ENOMEM; 931 } 932 933 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), 934 GFP_KERNEL); 935 if (!ctx_buf->context) { 936 kfree(ctx_buf); 937 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 938 "6405 Ran out of NVMET " 939 "context memory\n"); 940 return -ENOMEM; 941 } 942 ctx_buf->context->ctxbuf = ctx_buf; 943 ctx_buf->context->state = LPFC_NVMET_STE_FREE; 944 945 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); 946 if (!ctx_buf->iocbq) { 947 kfree(ctx_buf->context); 948 kfree(ctx_buf); 949 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 950 "6406 Ran out of NVMET iocb/WQEs\n"); 951 return -ENOMEM; 952 } 953 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; 954 nvmewqe = ctx_buf->iocbq; 955 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 956 /* Initialize WQE */ 957 memset(wqe, 0, sizeof(union lpfc_wqe)); 958 /* Word 7 */ 959 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); 960 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); 961 bf_set(wqe_pu, &wqe->generic.wqe_com, 1); 962 /* Word 10 */ 963 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 964 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); 965 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); 966 967 ctx_buf->iocbq->context1 = NULL; 968 spin_lock(&phba->sli4_hba.sgl_list_lock); 969 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); 970 spin_unlock(&phba->sli4_hba.sgl_list_lock); 971 if (!ctx_buf->sglq) { 972 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 973 kfree(ctx_buf->context); 974 kfree(ctx_buf); 975 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 976 "6407 Ran out of NVMET XRIs\n"); 977 return -ENOMEM; 978 } 979 spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); 980 list_add_tail(&ctx_buf->list, 981 &phba->sli4_hba.lpfc_nvmet_ctx_get_list); 982 spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); 983 } 984 phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt; 985 return 0; 986 } 987 988 int 989 lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 990 { 991 struct lpfc_vport *vport = phba->pport; 992 struct lpfc_nvmet_tgtport *tgtp; 993 struct nvmet_fc_port_info pinfo; 994 int error; 995 996 if (phba->targetport) 997 return 0; 998 999 error = lpfc_nvmet_setup_io_context(phba); 1000 if (error) 1001 return error; 1002 1003 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 1004 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 1005 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 1006 pinfo.port_id = vport->fc_myDID; 1007 1008 /* Limit to LPFC_MAX_NVME_SEG_CNT. 1009 * For now need + 1 to get around NVME transport logic. 1010 */ 1011 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 1012 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1013 "6400 Reducing sg segment cnt to %d\n", 1014 LPFC_MAX_NVME_SEG_CNT); 1015 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 1016 } else { 1017 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 1018 } 1019 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 1020 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 1021 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 1022 NVMET_FCTGTFEAT_CMD_IN_ISR | 1023 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 1024 1025 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1026 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 1027 &phba->pcidev->dev, 1028 &phba->targetport); 1029 #else 1030 error = -ENOENT; 1031 #endif 1032 if (error) { 1033 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1034 "6025 Cannot register NVME targetport " 1035 "x%x\n", error); 1036 phba->targetport = NULL; 1037 1038 lpfc_nvmet_cleanup_io_context(phba); 1039 1040 } else { 1041 tgtp = (struct lpfc_nvmet_tgtport *) 1042 phba->targetport->private; 1043 tgtp->phba = phba; 1044 1045 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1046 "6026 Registered NVME " 1047 "targetport: %p, private %p " 1048 "portnm %llx nodenm %llx\n", 1049 phba->targetport, tgtp, 1050 pinfo.port_name, pinfo.node_name); 1051 1052 atomic_set(&tgtp->rcv_ls_req_in, 0); 1053 atomic_set(&tgtp->rcv_ls_req_out, 0); 1054 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1055 atomic_set(&tgtp->xmt_ls_abort, 0); 1056 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); 1057 atomic_set(&tgtp->xmt_ls_rsp, 0); 1058 atomic_set(&tgtp->xmt_ls_drop, 0); 1059 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1060 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 1061 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1062 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1063 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1064 atomic_set(&tgtp->xmt_fcp_drop, 0); 1065 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1066 atomic_set(&tgtp->xmt_fcp_read, 0); 1067 atomic_set(&tgtp->xmt_fcp_write, 0); 1068 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1069 atomic_set(&tgtp->xmt_fcp_release, 0); 1070 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1071 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1072 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1073 atomic_set(&tgtp->xmt_fcp_abort, 0); 1074 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); 1075 atomic_set(&tgtp->xmt_abort_unsol, 0); 1076 atomic_set(&tgtp->xmt_abort_sol, 0); 1077 atomic_set(&tgtp->xmt_abort_rsp, 0); 1078 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1079 } 1080 return error; 1081 } 1082 1083 int 1084 lpfc_nvmet_update_targetport(struct lpfc_hba *phba) 1085 { 1086 struct lpfc_vport *vport = phba->pport; 1087 1088 if (!phba->targetport) 1089 return 0; 1090 1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1092 "6007 Update NVMET port %p did x%x\n", 1093 phba->targetport, vport->fc_myDID); 1094 1095 phba->targetport->port_id = vport->fc_myDID; 1096 return 0; 1097 } 1098 1099 /** 1100 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort 1101 * @phba: pointer to lpfc hba data structure. 1102 * @axri: pointer to the nvmet xri abort wcqe structure. 1103 * 1104 * This routine is invoked by the worker thread to process a SLI4 fast-path 1105 * NVMET aborted xri. 1106 **/ 1107 void 1108 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, 1109 struct sli4_wcqe_xri_aborted *axri) 1110 { 1111 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 1112 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 1113 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1114 struct lpfc_nodelist *ndlp; 1115 unsigned long iflag = 0; 1116 int rrq_empty = 0; 1117 bool released = false; 1118 1119 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1120 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); 1121 1122 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 1123 return; 1124 spin_lock_irqsave(&phba->hbalock, iflag); 1125 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1126 list_for_each_entry_safe(ctxp, next_ctxp, 1127 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1128 list) { 1129 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1130 continue; 1131 1132 /* Check if we already received a free context call 1133 * and we have completed processing an abort situation. 1134 */ 1135 if (ctxp->flag & LPFC_NVMET_CTX_RLS && 1136 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { 1137 list_del(&ctxp->list); 1138 released = true; 1139 } 1140 ctxp->flag &= ~LPFC_NVMET_XBUSY; 1141 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1142 1143 rrq_empty = list_empty(&phba->active_rrq_list); 1144 spin_unlock_irqrestore(&phba->hbalock, iflag); 1145 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1146 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1147 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1148 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1149 lpfc_set_rrq_active(phba, ndlp, 1150 ctxp->ctxbuf->sglq->sli4_lxritag, 1151 rxid, 1); 1152 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1153 } 1154 1155 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1156 "6318 XB aborted oxid %x flg x%x (%x)\n", 1157 ctxp->oxid, ctxp->flag, released); 1158 if (released) 1159 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1160 1161 if (rrq_empty) 1162 lpfc_worker_wake_up(phba); 1163 return; 1164 } 1165 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1166 spin_unlock_irqrestore(&phba->hbalock, iflag); 1167 } 1168 1169 int 1170 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 1171 struct fc_frame_header *fc_hdr) 1172 1173 { 1174 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1175 struct lpfc_hba *phba = vport->phba; 1176 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1177 struct nvmefc_tgt_fcp_req *rsp; 1178 uint16_t xri; 1179 unsigned long iflag = 0; 1180 1181 xri = be16_to_cpu(fc_hdr->fh_ox_id); 1182 1183 spin_lock_irqsave(&phba->hbalock, iflag); 1184 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1185 list_for_each_entry_safe(ctxp, next_ctxp, 1186 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1187 list) { 1188 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1189 continue; 1190 1191 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1192 spin_unlock_irqrestore(&phba->hbalock, iflag); 1193 1194 spin_lock_irqsave(&ctxp->ctxlock, iflag); 1195 ctxp->flag |= LPFC_NVMET_ABTS_RCV; 1196 spin_unlock_irqrestore(&ctxp->ctxlock, iflag); 1197 1198 lpfc_nvmeio_data(phba, 1199 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1200 xri, smp_processor_id(), 0); 1201 1202 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1203 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); 1204 1205 rsp = &ctxp->ctx.fcp_req; 1206 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); 1207 1208 /* Respond with BA_ACC accordingly */ 1209 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); 1210 return 0; 1211 } 1212 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1213 spin_unlock_irqrestore(&phba->hbalock, iflag); 1214 1215 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1216 xri, smp_processor_id(), 1); 1217 1218 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1219 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); 1220 1221 /* Respond with BA_RJT accordingly */ 1222 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); 1223 #endif 1224 return 0; 1225 } 1226 1227 void 1228 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 1229 { 1230 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1231 struct lpfc_nvmet_tgtport *tgtp; 1232 1233 if (phba->nvmet_support == 0) 1234 return; 1235 if (phba->targetport) { 1236 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1237 init_completion(&tgtp->tport_unreg_done); 1238 nvmet_fc_unregister_targetport(phba->targetport); 1239 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1240 lpfc_nvmet_cleanup_io_context(phba); 1241 } 1242 phba->targetport = NULL; 1243 #endif 1244 } 1245 1246 /** 1247 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer 1248 * @phba: pointer to lpfc hba data structure. 1249 * @pring: pointer to a SLI ring. 1250 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1251 * 1252 * This routine is used for processing the WQE associated with a unsolicited 1253 * event. It first determines whether there is an existing ndlp that matches 1254 * the DID from the unsolicited WQE. If not, it will create a new one with 1255 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1256 * WQE is then used to invoke the proper routine and to set up proper state 1257 * of the discovery state machine. 1258 **/ 1259 static void 1260 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1261 struct hbq_dmabuf *nvmebuf) 1262 { 1263 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1264 struct lpfc_nvmet_tgtport *tgtp; 1265 struct fc_frame_header *fc_hdr; 1266 struct lpfc_nvmet_rcv_ctx *ctxp; 1267 uint32_t *payload; 1268 uint32_t size, oxid, sid, rc; 1269 1270 if (!nvmebuf || !phba->targetport) { 1271 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1272 "6154 LS Drop IO\n"); 1273 oxid = 0; 1274 size = 0; 1275 sid = 0; 1276 ctxp = NULL; 1277 goto dropit; 1278 } 1279 1280 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1281 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1282 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1283 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 1284 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1285 sid = sli4_sid_from_fc_hdr(fc_hdr); 1286 1287 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); 1288 if (ctxp == NULL) { 1289 atomic_inc(&tgtp->rcv_ls_req_drop); 1290 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1291 "6155 LS Drop IO x%x: Alloc\n", 1292 oxid); 1293 dropit: 1294 lpfc_nvmeio_data(phba, "NVMET LS DROP: " 1295 "xri x%x sz %d from %06x\n", 1296 oxid, size, sid); 1297 if (nvmebuf) 1298 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1299 return; 1300 } 1301 ctxp->phba = phba; 1302 ctxp->size = size; 1303 ctxp->oxid = oxid; 1304 ctxp->sid = sid; 1305 ctxp->wqeq = NULL; 1306 ctxp->state = LPFC_NVMET_STE_LS_RCV; 1307 ctxp->entry_cnt = 1; 1308 ctxp->rqb_buffer = (void *)nvmebuf; 1309 1310 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", 1311 oxid, size, sid); 1312 /* 1313 * The calling sequence should be: 1314 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done 1315 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. 1316 */ 1317 atomic_inc(&tgtp->rcv_ls_req_in); 1318 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, 1319 payload, size); 1320 1321 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1322 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " 1323 "%08x %08x %08x\n", size, rc, 1324 *payload, *(payload+1), *(payload+2), 1325 *(payload+3), *(payload+4), *(payload+5)); 1326 1327 if (rc == 0) { 1328 atomic_inc(&tgtp->rcv_ls_req_out); 1329 return; 1330 } 1331 1332 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", 1333 oxid, size, sid); 1334 1335 atomic_inc(&tgtp->rcv_ls_req_drop); 1336 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1337 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", 1338 ctxp->oxid, rc); 1339 1340 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1341 if (nvmebuf) 1342 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1343 1344 atomic_inc(&tgtp->xmt_ls_abort); 1345 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 1346 #endif 1347 } 1348 1349 /** 1350 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer 1351 * @phba: pointer to lpfc hba data structure. 1352 * @pring: pointer to a SLI ring. 1353 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1354 * 1355 * This routine is used for processing the WQE associated with a unsolicited 1356 * event. It first determines whether there is an existing ndlp that matches 1357 * the DID from the unsolicited WQE. If not, it will create a new one with 1358 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1359 * WQE is then used to invoke the proper routine and to set up proper state 1360 * of the discovery state machine. 1361 **/ 1362 static void 1363 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, 1364 struct lpfc_sli_ring *pring, 1365 struct rqb_dmabuf *nvmebuf, 1366 uint64_t isr_timestamp) 1367 { 1368 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1369 struct lpfc_nvmet_rcv_ctx *ctxp; 1370 struct lpfc_nvmet_tgtport *tgtp; 1371 struct fc_frame_header *fc_hdr; 1372 struct lpfc_nvmet_ctxbuf *ctx_buf; 1373 uint32_t *payload; 1374 uint32_t size, oxid, sid, rc, qno; 1375 unsigned long iflag; 1376 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1377 uint32_t id; 1378 #endif 1379 1380 ctx_buf = NULL; 1381 if (!nvmebuf || !phba->targetport) { 1382 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1383 "6157 NVMET FCP Drop IO\n"); 1384 oxid = 0; 1385 size = 0; 1386 sid = 0; 1387 ctxp = NULL; 1388 goto dropit; 1389 } 1390 1391 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); 1392 if (phba->sli4_hba.nvmet_ctx_get_cnt) { 1393 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list, 1394 ctx_buf, struct lpfc_nvmet_ctxbuf, list); 1395 phba->sli4_hba.nvmet_ctx_get_cnt--; 1396 } else { 1397 spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); 1398 if (phba->sli4_hba.nvmet_ctx_put_cnt) { 1399 list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list, 1400 &phba->sli4_hba.lpfc_nvmet_ctx_get_list); 1401 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); 1402 phba->sli4_hba.nvmet_ctx_get_cnt = 1403 phba->sli4_hba.nvmet_ctx_put_cnt; 1404 phba->sli4_hba.nvmet_ctx_put_cnt = 0; 1405 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 1406 1407 list_remove_head( 1408 &phba->sli4_hba.lpfc_nvmet_ctx_get_list, 1409 ctx_buf, struct lpfc_nvmet_ctxbuf, list); 1410 phba->sli4_hba.nvmet_ctx_get_cnt--; 1411 } else { 1412 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 1413 } 1414 } 1415 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); 1416 1417 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1418 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1419 size = nvmebuf->bytes_recv; 1420 1421 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1422 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 1423 id = smp_processor_id(); 1424 if (id < LPFC_CHECK_CPU_CNT) 1425 phba->cpucheck_rcv_io[id]++; 1426 } 1427 #endif 1428 1429 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", 1430 oxid, size, smp_processor_id()); 1431 1432 if (!ctx_buf) { 1433 /* Queue this NVME IO to process later */ 1434 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 1435 list_add_tail(&nvmebuf->hbuf.list, 1436 &phba->sli4_hba.lpfc_nvmet_io_wait_list); 1437 phba->sli4_hba.nvmet_io_wait_cnt++; 1438 phba->sli4_hba.nvmet_io_wait_total++; 1439 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 1440 iflag); 1441 1442 /* Post a brand new DMA buffer to RQ */ 1443 qno = nvmebuf->idx; 1444 lpfc_post_rq_buffer( 1445 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], 1446 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); 1447 return; 1448 } 1449 1450 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1451 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1452 sid = sli4_sid_from_fc_hdr(fc_hdr); 1453 1454 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 1455 if (ctxp->state != LPFC_NVMET_STE_FREE) { 1456 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1457 "6414 NVMET Context corrupt %d %d oxid x%x\n", 1458 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 1459 } 1460 ctxp->wqeq = NULL; 1461 ctxp->txrdy = NULL; 1462 ctxp->offset = 0; 1463 ctxp->phba = phba; 1464 ctxp->size = size; 1465 ctxp->oxid = oxid; 1466 ctxp->sid = sid; 1467 ctxp->state = LPFC_NVMET_STE_RCV; 1468 ctxp->entry_cnt = 1; 1469 ctxp->flag = 0; 1470 ctxp->ctxbuf = ctx_buf; 1471 spin_lock_init(&ctxp->ctxlock); 1472 1473 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1474 if (phba->ktime_on) { 1475 ctxp->ts_isr_cmd = isr_timestamp; 1476 ctxp->ts_cmd_nvme = ktime_get_ns(); 1477 ctxp->ts_nvme_data = 0; 1478 ctxp->ts_data_wqput = 0; 1479 ctxp->ts_isr_data = 0; 1480 ctxp->ts_data_nvme = 0; 1481 ctxp->ts_nvme_status = 0; 1482 ctxp->ts_status_wqput = 0; 1483 ctxp->ts_isr_status = 0; 1484 ctxp->ts_status_nvme = 0; 1485 } 1486 #endif 1487 1488 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1489 /* 1490 * The calling sequence should be: 1491 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1492 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1493 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in 1494 * the NVME command / FC header is stored, so we are free to repost 1495 * the buffer. 1496 */ 1497 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1498 payload, size); 1499 1500 /* Process FCP command */ 1501 if (rc == 0) { 1502 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1503 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1504 return; 1505 } 1506 1507 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1508 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1509 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 1510 ctxp->oxid, rc, 1511 atomic_read(&tgtp->rcv_fcp_cmd_in), 1512 atomic_read(&tgtp->rcv_fcp_cmd_out), 1513 atomic_read(&tgtp->xmt_fcp_release)); 1514 dropit: 1515 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1516 oxid, size, sid); 1517 if (oxid) { 1518 lpfc_nvmet_defer_release(phba, ctxp); 1519 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1520 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1521 return; 1522 } 1523 1524 if (ctx_buf) 1525 lpfc_nvmet_ctxbuf_post(phba, ctx_buf); 1526 1527 if (nvmebuf) 1528 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1529 #endif 1530 } 1531 1532 /** 1533 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport 1534 * @phba: pointer to lpfc hba data structure. 1535 * @pring: pointer to a SLI ring. 1536 * @nvmebuf: pointer to received nvme data structure. 1537 * 1538 * This routine is used to process an unsolicited event received from a SLI 1539 * (Service Level Interface) ring. The actual processing of the data buffer 1540 * associated with the unsolicited event is done by invoking the routine 1541 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the 1542 * SLI RQ on which the unsolicited event was received. 1543 **/ 1544 void 1545 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1546 struct lpfc_iocbq *piocb) 1547 { 1548 struct lpfc_dmabuf *d_buf; 1549 struct hbq_dmabuf *nvmebuf; 1550 1551 d_buf = piocb->context2; 1552 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1553 1554 if (phba->nvmet_support == 0) { 1555 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1556 return; 1557 } 1558 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); 1559 } 1560 1561 /** 1562 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport 1563 * @phba: pointer to lpfc hba data structure. 1564 * @pring: pointer to a SLI ring. 1565 * @nvmebuf: pointer to received nvme data structure. 1566 * 1567 * This routine is used to process an unsolicited event received from a SLI 1568 * (Service Level Interface) ring. The actual processing of the data buffer 1569 * associated with the unsolicited event is done by invoking the routine 1570 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the 1571 * SLI RQ on which the unsolicited event was received. 1572 **/ 1573 void 1574 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 1575 struct lpfc_sli_ring *pring, 1576 struct rqb_dmabuf *nvmebuf, 1577 uint64_t isr_timestamp) 1578 { 1579 if (phba->nvmet_support == 0) { 1580 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); 1581 return; 1582 } 1583 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1584 isr_timestamp); 1585 } 1586 1587 /** 1588 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure 1589 * @phba: pointer to a host N_Port data structure. 1590 * @ctxp: Context info for NVME LS Request 1591 * @rspbuf: DMA buffer of NVME command. 1592 * @rspsize: size of the NVME command. 1593 * 1594 * This routine is used for allocating a lpfc-WQE data structure from 1595 * the driver lpfc-WQE free-list and prepare the WQE with the parameters 1596 * passed into the routine for discovery state machine to issue an Extended 1597 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation 1598 * and preparation routine that is used by all the discovery state machine 1599 * routines and the NVME command-specific fields will be later set up by 1600 * the individual discovery machine routines after calling this routine 1601 * allocating and preparing a generic WQE data structure. It fills in the 1602 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 1603 * payload and response payload (if expected). The reference count on the 1604 * ndlp is incremented by 1 and the reference to the ndlp is put into 1605 * context1 of the WQE data structure for this WQE to hold the ndlp 1606 * reference for the command's callback function to access later. 1607 * 1608 * Return code 1609 * Pointer to the newly allocated/prepared nvme wqe data structure 1610 * NULL - when nvme wqe data structure allocation/preparation failed 1611 **/ 1612 static struct lpfc_iocbq * 1613 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, 1614 struct lpfc_nvmet_rcv_ctx *ctxp, 1615 dma_addr_t rspbuf, uint16_t rspsize) 1616 { 1617 struct lpfc_nodelist *ndlp; 1618 struct lpfc_iocbq *nvmewqe; 1619 union lpfc_wqe *wqe; 1620 1621 if (!lpfc_is_link_up(phba)) { 1622 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1623 "6104 NVMET prep LS wqe: link err: " 1624 "NPORT x%x oxid:x%x ste %d\n", 1625 ctxp->sid, ctxp->oxid, ctxp->state); 1626 return NULL; 1627 } 1628 1629 /* Allocate buffer for command wqe */ 1630 nvmewqe = lpfc_sli_get_iocbq(phba); 1631 if (nvmewqe == NULL) { 1632 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1633 "6105 NVMET prep LS wqe: No WQE: " 1634 "NPORT x%x oxid x%x ste %d\n", 1635 ctxp->sid, ctxp->oxid, ctxp->state); 1636 return NULL; 1637 } 1638 1639 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1640 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1641 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1642 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1643 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1644 "6106 NVMET prep LS wqe: No ndlp: " 1645 "NPORT x%x oxid x%x ste %d\n", 1646 ctxp->sid, ctxp->oxid, ctxp->state); 1647 goto nvme_wqe_free_wqeq_exit; 1648 } 1649 ctxp->wqeq = nvmewqe; 1650 1651 /* prevent preparing wqe with NULL ndlp reference */ 1652 nvmewqe->context1 = lpfc_nlp_get(ndlp); 1653 if (nvmewqe->context1 == NULL) 1654 goto nvme_wqe_free_wqeq_exit; 1655 nvmewqe->context2 = ctxp; 1656 1657 wqe = &nvmewqe->wqe; 1658 memset(wqe, 0, sizeof(union lpfc_wqe)); 1659 1660 /* Words 0 - 2 */ 1661 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1662 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; 1663 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); 1664 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); 1665 1666 /* Word 3 */ 1667 1668 /* Word 4 */ 1669 1670 /* Word 5 */ 1671 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1672 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1673 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1674 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); 1675 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1676 1677 /* Word 6 */ 1678 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 1679 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1680 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); 1681 1682 /* Word 7 */ 1683 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 1684 CMD_XMIT_SEQUENCE64_WQE); 1685 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); 1686 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 1687 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 1688 1689 /* Word 8 */ 1690 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; 1691 1692 /* Word 9 */ 1693 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); 1694 /* Needs to be set by caller */ 1695 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); 1696 1697 /* Word 10 */ 1698 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 1699 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 1700 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 1701 LPFC_WQE_LENLOC_WORD12); 1702 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 1703 1704 /* Word 11 */ 1705 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, 1706 LPFC_WQE_CQ_ID_DEFAULT); 1707 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, 1708 OTHER_COMMAND); 1709 1710 /* Word 12 */ 1711 wqe->xmit_sequence.xmit_len = rspsize; 1712 1713 nvmewqe->retry = 1; 1714 nvmewqe->vport = phba->pport; 1715 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1716 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; 1717 1718 /* Xmit NVMET response to remote NPORT <did> */ 1719 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1720 "6039 Xmit NVMET LS response to remote " 1721 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", 1722 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, 1723 rspsize); 1724 return nvmewqe; 1725 1726 nvme_wqe_free_wqeq_exit: 1727 nvmewqe->context2 = NULL; 1728 nvmewqe->context3 = NULL; 1729 lpfc_sli_release_iocbq(phba, nvmewqe); 1730 return NULL; 1731 } 1732 1733 1734 static struct lpfc_iocbq * 1735 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, 1736 struct lpfc_nvmet_rcv_ctx *ctxp) 1737 { 1738 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; 1739 struct lpfc_nvmet_tgtport *tgtp; 1740 struct sli4_sge *sgl; 1741 struct lpfc_nodelist *ndlp; 1742 struct lpfc_iocbq *nvmewqe; 1743 struct scatterlist *sgel; 1744 union lpfc_wqe128 *wqe; 1745 uint32_t *txrdy; 1746 dma_addr_t physaddr; 1747 int i, cnt; 1748 int xc = 1; 1749 1750 if (!lpfc_is_link_up(phba)) { 1751 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1752 "6107 NVMET prep FCP wqe: link err:" 1753 "NPORT x%x oxid x%x ste %d\n", 1754 ctxp->sid, ctxp->oxid, ctxp->state); 1755 return NULL; 1756 } 1757 1758 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1759 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1760 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1761 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1762 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1763 "6108 NVMET prep FCP wqe: no ndlp: " 1764 "NPORT x%x oxid x%x ste %d\n", 1765 ctxp->sid, ctxp->oxid, ctxp->state); 1766 return NULL; 1767 } 1768 1769 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { 1770 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1771 "6109 NVMET prep FCP wqe: seg cnt err: " 1772 "NPORT x%x oxid x%x ste %d cnt %d\n", 1773 ctxp->sid, ctxp->oxid, ctxp->state, 1774 phba->cfg_nvme_seg_cnt); 1775 return NULL; 1776 } 1777 1778 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1779 nvmewqe = ctxp->wqeq; 1780 if (nvmewqe == NULL) { 1781 /* Allocate buffer for command wqe */ 1782 nvmewqe = ctxp->ctxbuf->iocbq; 1783 if (nvmewqe == NULL) { 1784 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1785 "6110 NVMET prep FCP wqe: No " 1786 "WQE: NPORT x%x oxid x%x ste %d\n", 1787 ctxp->sid, ctxp->oxid, ctxp->state); 1788 return NULL; 1789 } 1790 ctxp->wqeq = nvmewqe; 1791 xc = 0; /* create new XRI */ 1792 nvmewqe->sli4_lxritag = NO_XRI; 1793 nvmewqe->sli4_xritag = NO_XRI; 1794 } 1795 1796 /* Sanity check */ 1797 if (((ctxp->state == LPFC_NVMET_STE_RCV) && 1798 (ctxp->entry_cnt == 1)) || 1799 (ctxp->state == LPFC_NVMET_STE_DATA)) { 1800 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 1801 } else { 1802 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1803 "6111 Wrong state NVMET FCP: %d cnt %d\n", 1804 ctxp->state, ctxp->entry_cnt); 1805 return NULL; 1806 } 1807 1808 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; 1809 switch (rsp->op) { 1810 case NVMET_FCOP_READDATA: 1811 case NVMET_FCOP_READDATA_RSP: 1812 /* Words 0 - 2 : The first sg segment */ 1813 sgel = &rsp->sg[0]; 1814 physaddr = sg_dma_address(sgel); 1815 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1816 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); 1817 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); 1818 wqe->fcp_tsend.bde.addrHigh = 1819 cpu_to_le32(putPaddrHigh(physaddr)); 1820 1821 /* Word 3 */ 1822 wqe->fcp_tsend.payload_offset_len = 0; 1823 1824 /* Word 4 */ 1825 wqe->fcp_tsend.relative_offset = ctxp->offset; 1826 1827 /* Word 5 */ 1828 1829 /* Word 6 */ 1830 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, 1831 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1832 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, 1833 nvmewqe->sli4_xritag); 1834 1835 /* Word 7 */ 1836 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 1837 1838 /* Word 8 */ 1839 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; 1840 1841 /* Word 9 */ 1842 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); 1843 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); 1844 1845 /* Word 10 */ 1846 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1847 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); 1848 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); 1849 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, 1850 LPFC_WQE_LENLOC_WORD12); 1851 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); 1852 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); 1853 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1854 if (phba->cfg_nvme_oas) 1855 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); 1856 1857 /* Word 11 */ 1858 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, 1859 LPFC_WQE_CQ_ID_DEFAULT); 1860 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, 1861 FCP_COMMAND_TSEND); 1862 1863 /* Word 12 */ 1864 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1865 1866 /* Setup 2 SKIP SGEs */ 1867 sgl->addr_hi = 0; 1868 sgl->addr_lo = 0; 1869 sgl->word2 = 0; 1870 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1871 sgl->word2 = cpu_to_le32(sgl->word2); 1872 sgl->sge_len = 0; 1873 sgl++; 1874 sgl->addr_hi = 0; 1875 sgl->addr_lo = 0; 1876 sgl->word2 = 0; 1877 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1878 sgl->word2 = cpu_to_le32(sgl->word2); 1879 sgl->sge_len = 0; 1880 sgl++; 1881 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 1882 atomic_inc(&tgtp->xmt_fcp_read_rsp); 1883 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 1884 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && 1885 (rsp->rsplen == 12)) { 1886 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); 1887 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1888 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1889 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1890 } else { 1891 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1892 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); 1893 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); 1894 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 1895 ((rsp->rsplen >> 2) - 1)); 1896 memcpy(&wqe->words[16], rsp->rspaddr, 1897 rsp->rsplen); 1898 } 1899 } else { 1900 atomic_inc(&tgtp->xmt_fcp_read); 1901 1902 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1903 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1904 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1905 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); 1906 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1907 } 1908 break; 1909 1910 case NVMET_FCOP_WRITEDATA: 1911 /* Words 0 - 2 : The first sg segment */ 1912 txrdy = pci_pool_alloc(phba->txrdy_payload_pool, 1913 GFP_KERNEL, &physaddr); 1914 if (!txrdy) { 1915 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1916 "6041 Bad txrdy buffer: oxid x%x\n", 1917 ctxp->oxid); 1918 return NULL; 1919 } 1920 ctxp->txrdy = txrdy; 1921 ctxp->txrdy_phys = physaddr; 1922 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1923 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; 1924 wqe->fcp_treceive.bde.addrLow = 1925 cpu_to_le32(putPaddrLow(physaddr)); 1926 wqe->fcp_treceive.bde.addrHigh = 1927 cpu_to_le32(putPaddrHigh(physaddr)); 1928 1929 /* Word 3 */ 1930 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; 1931 1932 /* Word 4 */ 1933 wqe->fcp_treceive.relative_offset = ctxp->offset; 1934 1935 /* Word 5 */ 1936 1937 /* Word 6 */ 1938 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, 1939 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1940 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, 1941 nvmewqe->sli4_xritag); 1942 1943 /* Word 7 */ 1944 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); 1945 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, 1946 CMD_FCP_TRECEIVE64_WQE); 1947 1948 /* Word 8 */ 1949 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; 1950 1951 /* Word 9 */ 1952 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); 1953 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); 1954 1955 /* Word 10 */ 1956 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1957 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); 1958 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); 1959 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, 1960 LPFC_WQE_LENLOC_WORD12); 1961 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); 1962 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); 1963 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 1964 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 1965 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1966 if (phba->cfg_nvme_oas) 1967 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); 1968 1969 /* Word 11 */ 1970 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, 1971 LPFC_WQE_CQ_ID_DEFAULT); 1972 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, 1973 FCP_COMMAND_TRECEIVE); 1974 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1975 1976 /* Word 12 */ 1977 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1978 1979 /* Setup 1 TXRDY and 1 SKIP SGE */ 1980 txrdy[0] = 0; 1981 txrdy[1] = cpu_to_be32(rsp->transfer_length); 1982 txrdy[2] = 0; 1983 1984 sgl->addr_hi = putPaddrHigh(physaddr); 1985 sgl->addr_lo = putPaddrLow(physaddr); 1986 sgl->word2 = 0; 1987 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 1988 sgl->word2 = cpu_to_le32(sgl->word2); 1989 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); 1990 sgl++; 1991 sgl->addr_hi = 0; 1992 sgl->addr_lo = 0; 1993 sgl->word2 = 0; 1994 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1995 sgl->word2 = cpu_to_le32(sgl->word2); 1996 sgl->sge_len = 0; 1997 sgl++; 1998 atomic_inc(&tgtp->xmt_fcp_write); 1999 break; 2000 2001 case NVMET_FCOP_RSP: 2002 /* Words 0 - 2 */ 2003 physaddr = rsp->rspdma; 2004 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2005 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 2006 wqe->fcp_trsp.bde.addrLow = 2007 cpu_to_le32(putPaddrLow(physaddr)); 2008 wqe->fcp_trsp.bde.addrHigh = 2009 cpu_to_le32(putPaddrHigh(physaddr)); 2010 2011 /* Word 3 */ 2012 wqe->fcp_trsp.response_len = rsp->rsplen; 2013 2014 /* Word 4 */ 2015 wqe->fcp_trsp.rsvd_4_5[0] = 0; 2016 2017 2018 /* Word 5 */ 2019 2020 /* Word 6 */ 2021 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, 2022 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2023 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, 2024 nvmewqe->sli4_xritag); 2025 2026 /* Word 7 */ 2027 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); 2028 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); 2029 2030 /* Word 8 */ 2031 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; 2032 2033 /* Word 9 */ 2034 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); 2035 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); 2036 2037 /* Word 10 */ 2038 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2039 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); 2040 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); 2041 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, 2042 LPFC_WQE_LENLOC_WORD3); 2043 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); 2044 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2045 if (phba->cfg_nvme_oas) 2046 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); 2047 2048 /* Word 11 */ 2049 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, 2050 LPFC_WQE_CQ_ID_DEFAULT); 2051 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, 2052 FCP_COMMAND_TRSP); 2053 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2054 2055 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { 2056 /* Good response - all zero's on wire */ 2057 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); 2058 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 2059 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 2060 } else { 2061 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); 2062 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); 2063 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 2064 ((rsp->rsplen >> 2) - 1)); 2065 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 2066 } 2067 2068 /* Use rspbuf, NOT sg list */ 2069 rsp->sg_cnt = 0; 2070 sgl->word2 = 0; 2071 atomic_inc(&tgtp->xmt_fcp_rsp); 2072 break; 2073 2074 default: 2075 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2076 "6064 Unknown Rsp Op %d\n", 2077 rsp->op); 2078 return NULL; 2079 } 2080 2081 nvmewqe->retry = 1; 2082 nvmewqe->vport = phba->pport; 2083 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 2084 nvmewqe->context1 = ndlp; 2085 2086 for (i = 0; i < rsp->sg_cnt; i++) { 2087 sgel = &rsp->sg[i]; 2088 physaddr = sg_dma_address(sgel); 2089 cnt = sg_dma_len(sgel); 2090 sgl->addr_hi = putPaddrHigh(physaddr); 2091 sgl->addr_lo = putPaddrLow(physaddr); 2092 sgl->word2 = 0; 2093 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2094 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); 2095 if ((i+1) == rsp->sg_cnt) 2096 bf_set(lpfc_sli4_sge_last, sgl, 1); 2097 sgl->word2 = cpu_to_le32(sgl->word2); 2098 sgl->sge_len = cpu_to_le32(cnt); 2099 sgl++; 2100 ctxp->offset += cnt; 2101 } 2102 ctxp->state = LPFC_NVMET_STE_DATA; 2103 ctxp->entry_cnt++; 2104 return nvmewqe; 2105 } 2106 2107 /** 2108 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS 2109 * @phba: Pointer to HBA context object. 2110 * @cmdwqe: Pointer to driver command WQE object. 2111 * @wcqe: Pointer to driver response CQE object. 2112 * 2113 * The function is called from SLI ring event handler with no 2114 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2115 * The function frees memory resources used for the NVME commands. 2116 **/ 2117 static void 2118 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2119 struct lpfc_wcqe_complete *wcqe) 2120 { 2121 struct lpfc_nvmet_rcv_ctx *ctxp; 2122 struct lpfc_nvmet_tgtport *tgtp; 2123 uint32_t status, result; 2124 unsigned long flags; 2125 bool released = false; 2126 2127 ctxp = cmdwqe->context2; 2128 status = bf_get(lpfc_wcqe_c_status, wcqe); 2129 result = wcqe->parameter; 2130 2131 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2132 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2133 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2134 2135 ctxp->state = LPFC_NVMET_STE_DONE; 2136 2137 /* Check if we already received a free context call 2138 * and we have completed processing an abort situation. 2139 */ 2140 spin_lock_irqsave(&ctxp->ctxlock, flags); 2141 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2142 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2143 list_del(&ctxp->list); 2144 released = true; 2145 } 2146 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2147 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2148 atomic_inc(&tgtp->xmt_abort_rsp); 2149 2150 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2151 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2152 "WCQE: %08x %08x %08x %08x\n", 2153 ctxp->oxid, ctxp->flag, released, 2154 wcqe->word0, wcqe->total_data_placed, 2155 result, wcqe->word3); 2156 2157 cmdwqe->context2 = NULL; 2158 cmdwqe->context3 = NULL; 2159 /* 2160 * if transport has released ctx, then can reuse it. Otherwise, 2161 * will be recycled by transport release call. 2162 */ 2163 if (released) 2164 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2165 2166 /* This is the iocbq for the abort, not the command */ 2167 lpfc_sli_release_iocbq(phba, cmdwqe); 2168 2169 /* Since iaab/iaar are NOT set, there is no work left. 2170 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2171 * should have been called already. 2172 */ 2173 } 2174 2175 /** 2176 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS 2177 * @phba: Pointer to HBA context object. 2178 * @cmdwqe: Pointer to driver command WQE object. 2179 * @wcqe: Pointer to driver response CQE object. 2180 * 2181 * The function is called from SLI ring event handler with no 2182 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2183 * The function frees memory resources used for the NVME commands. 2184 **/ 2185 static void 2186 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2187 struct lpfc_wcqe_complete *wcqe) 2188 { 2189 struct lpfc_nvmet_rcv_ctx *ctxp; 2190 struct lpfc_nvmet_tgtport *tgtp; 2191 unsigned long flags; 2192 uint32_t status, result; 2193 bool released = false; 2194 2195 ctxp = cmdwqe->context2; 2196 status = bf_get(lpfc_wcqe_c_status, wcqe); 2197 result = wcqe->parameter; 2198 2199 if (!ctxp) { 2200 /* if context is clear, related io alrady complete */ 2201 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2202 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", 2203 wcqe->word0, wcqe->total_data_placed, 2204 result, wcqe->word3); 2205 return; 2206 } 2207 2208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2209 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2210 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2211 2212 /* Sanity check */ 2213 if (ctxp->state != LPFC_NVMET_STE_ABORT) { 2214 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2215 "6112 ABTS Wrong state:%d oxid x%x\n", 2216 ctxp->state, ctxp->oxid); 2217 } 2218 2219 /* Check if we already received a free context call 2220 * and we have completed processing an abort situation. 2221 */ 2222 ctxp->state = LPFC_NVMET_STE_DONE; 2223 spin_lock_irqsave(&ctxp->ctxlock, flags); 2224 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2225 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2226 list_del(&ctxp->list); 2227 released = true; 2228 } 2229 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2230 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2231 atomic_inc(&tgtp->xmt_abort_rsp); 2232 2233 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2234 "6316 ABTS cmpl xri x%x flg x%x (%x) " 2235 "WCQE: %08x %08x %08x %08x\n", 2236 ctxp->oxid, ctxp->flag, released, 2237 wcqe->word0, wcqe->total_data_placed, 2238 result, wcqe->word3); 2239 2240 cmdwqe->context2 = NULL; 2241 cmdwqe->context3 = NULL; 2242 /* 2243 * if transport has released ctx, then can reuse it. Otherwise, 2244 * will be recycled by transport release call. 2245 */ 2246 if (released) 2247 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2248 2249 /* Since iaab/iaar are NOT set, there is no work left. 2250 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2251 * should have been called already. 2252 */ 2253 } 2254 2255 /** 2256 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS 2257 * @phba: Pointer to HBA context object. 2258 * @cmdwqe: Pointer to driver command WQE object. 2259 * @wcqe: Pointer to driver response CQE object. 2260 * 2261 * The function is called from SLI ring event handler with no 2262 * lock held. This function is the completion handler for NVME ABTS for LS cmds 2263 * The function frees memory resources used for the NVME commands. 2264 **/ 2265 static void 2266 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2267 struct lpfc_wcqe_complete *wcqe) 2268 { 2269 struct lpfc_nvmet_rcv_ctx *ctxp; 2270 struct lpfc_nvmet_tgtport *tgtp; 2271 uint32_t status, result; 2272 2273 ctxp = cmdwqe->context2; 2274 status = bf_get(lpfc_wcqe_c_status, wcqe); 2275 result = wcqe->parameter; 2276 2277 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2278 atomic_inc(&tgtp->xmt_ls_abort_cmpl); 2279 2280 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2281 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", 2282 ctxp, wcqe->word0, wcqe->total_data_placed, 2283 result, wcqe->word3); 2284 2285 if (!ctxp) { 2286 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2287 "6415 NVMET LS Abort No ctx: WCQE: " 2288 "%08x %08x %08x %08x\n", 2289 wcqe->word0, wcqe->total_data_placed, 2290 result, wcqe->word3); 2291 2292 lpfc_sli_release_iocbq(phba, cmdwqe); 2293 return; 2294 } 2295 2296 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { 2297 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2298 "6416 NVMET LS abort cmpl state mismatch: " 2299 "oxid x%x: %d %d\n", 2300 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2301 } 2302 2303 cmdwqe->context2 = NULL; 2304 cmdwqe->context3 = NULL; 2305 lpfc_sli_release_iocbq(phba, cmdwqe); 2306 kfree(ctxp); 2307 } 2308 2309 static int 2310 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, 2311 struct lpfc_nvmet_rcv_ctx *ctxp, 2312 uint32_t sid, uint16_t xri) 2313 { 2314 struct lpfc_nvmet_tgtport *tgtp; 2315 struct lpfc_iocbq *abts_wqeq; 2316 union lpfc_wqe *wqe_abts; 2317 struct lpfc_nodelist *ndlp; 2318 2319 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2320 "6067 ABTS: sid %x xri x%x/x%x\n", 2321 sid, xri, ctxp->wqeq->sli4_xritag); 2322 2323 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2324 2325 ndlp = lpfc_findnode_did(phba->pport, sid); 2326 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2327 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2328 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2329 atomic_inc(&tgtp->xmt_abort_rsp_error); 2330 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2331 "6134 Drop ABTS - wrong NDLP state x%x.\n", 2332 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2333 2334 /* No failure to an ABTS request. */ 2335 return 0; 2336 } 2337 2338 abts_wqeq = ctxp->wqeq; 2339 wqe_abts = &abts_wqeq->wqe; 2340 2341 /* 2342 * Since we zero the whole WQE, we need to ensure we set the WQE fields 2343 * that were initialized in lpfc_sli4_nvmet_alloc. 2344 */ 2345 memset(wqe_abts, 0, sizeof(union lpfc_wqe)); 2346 2347 /* Word 5 */ 2348 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); 2349 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); 2350 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); 2351 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); 2352 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); 2353 2354 /* Word 6 */ 2355 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, 2356 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2357 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, 2358 abts_wqeq->sli4_xritag); 2359 2360 /* Word 7 */ 2361 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, 2362 CMD_XMIT_SEQUENCE64_WQE); 2363 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); 2364 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); 2365 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); 2366 2367 /* Word 8 */ 2368 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; 2369 2370 /* Word 9 */ 2371 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); 2372 /* Needs to be set by caller */ 2373 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); 2374 2375 /* Word 10 */ 2376 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); 2377 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 2378 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, 2379 LPFC_WQE_LENLOC_WORD12); 2380 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); 2381 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); 2382 2383 /* Word 11 */ 2384 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, 2385 LPFC_WQE_CQ_ID_DEFAULT); 2386 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, 2387 OTHER_COMMAND); 2388 2389 abts_wqeq->vport = phba->pport; 2390 abts_wqeq->context1 = ndlp; 2391 abts_wqeq->context2 = ctxp; 2392 abts_wqeq->context3 = NULL; 2393 abts_wqeq->rsvd2 = 0; 2394 /* hba_wqidx should already be setup from command we are aborting */ 2395 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2396 abts_wqeq->iocb.ulpLe = 1; 2397 2398 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2399 "6069 Issue ABTS to xri x%x reqtag x%x\n", 2400 xri, abts_wqeq->iotag); 2401 return 1; 2402 } 2403 2404 static int 2405 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, 2406 struct lpfc_nvmet_rcv_ctx *ctxp, 2407 uint32_t sid, uint16_t xri) 2408 { 2409 struct lpfc_nvmet_tgtport *tgtp; 2410 struct lpfc_iocbq *abts_wqeq; 2411 union lpfc_wqe *abts_wqe; 2412 struct lpfc_nodelist *ndlp; 2413 unsigned long flags; 2414 int rc; 2415 2416 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2417 if (!ctxp->wqeq) { 2418 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2419 ctxp->wqeq->hba_wqidx = 0; 2420 } 2421 2422 ndlp = lpfc_findnode_did(phba->pport, sid); 2423 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2424 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2425 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2426 atomic_inc(&tgtp->xmt_abort_rsp_error); 2427 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2428 "6160 Drop ABORT - wrong NDLP state x%x.\n", 2429 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2430 2431 /* No failure to an ABTS request. */ 2432 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2433 return 0; 2434 } 2435 2436 /* Issue ABTS for this WQE based on iotag */ 2437 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2438 if (!ctxp->abort_wqeq) { 2439 atomic_inc(&tgtp->xmt_abort_rsp_error); 2440 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2441 "6161 ABORT failed: No wqeqs: " 2442 "xri: x%x\n", ctxp->oxid); 2443 /* No failure to an ABTS request. */ 2444 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2445 return 0; 2446 } 2447 abts_wqeq = ctxp->abort_wqeq; 2448 abts_wqe = &abts_wqeq->wqe; 2449 ctxp->state = LPFC_NVMET_STE_ABORT; 2450 2451 /* Announce entry to new IO submit field. */ 2452 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2453 "6162 ABORT Request to rport DID x%06x " 2454 "for xri x%x x%x\n", 2455 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); 2456 2457 /* If the hba is getting reset, this flag is set. It is 2458 * cleared when the reset is complete and rings reestablished. 2459 */ 2460 spin_lock_irqsave(&phba->hbalock, flags); 2461 /* driver queued commands are in process of being flushed */ 2462 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2463 spin_unlock_irqrestore(&phba->hbalock, flags); 2464 atomic_inc(&tgtp->xmt_abort_rsp_error); 2465 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2466 "6163 Driver in reset cleanup - flushing " 2467 "NVME Req now. hba_flag x%x oxid x%x\n", 2468 phba->hba_flag, ctxp->oxid); 2469 lpfc_sli_release_iocbq(phba, abts_wqeq); 2470 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2471 return 0; 2472 } 2473 2474 /* Outstanding abort is in progress */ 2475 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2476 spin_unlock_irqrestore(&phba->hbalock, flags); 2477 atomic_inc(&tgtp->xmt_abort_rsp_error); 2478 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2479 "6164 Outstanding NVME I/O Abort Request " 2480 "still pending on oxid x%x\n", 2481 ctxp->oxid); 2482 lpfc_sli_release_iocbq(phba, abts_wqeq); 2483 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2484 return 0; 2485 } 2486 2487 /* Ready - mark outstanding as aborted by driver. */ 2488 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; 2489 2490 /* WQEs are reused. Clear stale data and set key fields to 2491 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 2492 */ 2493 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 2494 2495 /* word 3 */ 2496 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 2497 2498 /* word 7 */ 2499 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 2500 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 2501 2502 /* word 8 - tell the FW to abort the IO associated with this 2503 * outstanding exchange ID. 2504 */ 2505 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; 2506 2507 /* word 9 - this is the iotag for the abts_wqe completion. */ 2508 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 2509 abts_wqeq->iotag); 2510 2511 /* word 10 */ 2512 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 2513 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 2514 2515 /* word 11 */ 2516 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 2517 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 2518 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 2519 2520 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 2521 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; 2522 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; 2523 abts_wqeq->iocb_cmpl = 0; 2524 abts_wqeq->iocb_flag |= LPFC_IO_NVME; 2525 abts_wqeq->context2 = ctxp; 2526 abts_wqeq->vport = phba->pport; 2527 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2528 spin_unlock_irqrestore(&phba->hbalock, flags); 2529 if (rc == WQE_SUCCESS) { 2530 atomic_inc(&tgtp->xmt_abort_sol); 2531 return 0; 2532 } 2533 2534 atomic_inc(&tgtp->xmt_abort_rsp_error); 2535 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2536 lpfc_sli_release_iocbq(phba, abts_wqeq); 2537 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2538 "6166 Failed ABORT issue_wqe with status x%x " 2539 "for oxid x%x.\n", 2540 rc, ctxp->oxid); 2541 return 1; 2542 } 2543 2544 2545 static int 2546 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, 2547 struct lpfc_nvmet_rcv_ctx *ctxp, 2548 uint32_t sid, uint16_t xri) 2549 { 2550 struct lpfc_nvmet_tgtport *tgtp; 2551 struct lpfc_iocbq *abts_wqeq; 2552 unsigned long flags; 2553 int rc; 2554 2555 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2556 if (!ctxp->wqeq) { 2557 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2558 ctxp->wqeq->hba_wqidx = 0; 2559 } 2560 2561 if (ctxp->state == LPFC_NVMET_STE_FREE) { 2562 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2563 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", 2564 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 2565 rc = WQE_BUSY; 2566 goto aerr; 2567 } 2568 ctxp->state = LPFC_NVMET_STE_ABORT; 2569 ctxp->entry_cnt++; 2570 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2571 if (rc == 0) 2572 goto aerr; 2573 2574 spin_lock_irqsave(&phba->hbalock, flags); 2575 abts_wqeq = ctxp->wqeq; 2576 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; 2577 abts_wqeq->iocb_cmpl = NULL; 2578 abts_wqeq->iocb_flag |= LPFC_IO_NVMET; 2579 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2580 spin_unlock_irqrestore(&phba->hbalock, flags); 2581 if (rc == WQE_SUCCESS) { 2582 return 0; 2583 } 2584 2585 aerr: 2586 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2587 atomic_inc(&tgtp->xmt_abort_rsp_error); 2588 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2589 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", 2590 ctxp->oxid, rc); 2591 return 1; 2592 } 2593 2594 static int 2595 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, 2596 struct lpfc_nvmet_rcv_ctx *ctxp, 2597 uint32_t sid, uint16_t xri) 2598 { 2599 struct lpfc_nvmet_tgtport *tgtp; 2600 struct lpfc_iocbq *abts_wqeq; 2601 union lpfc_wqe *wqe_abts; 2602 unsigned long flags; 2603 int rc; 2604 2605 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || 2606 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { 2607 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2608 ctxp->entry_cnt++; 2609 } else { 2610 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2611 "6418 NVMET LS abort state mismatch " 2612 "IO x%x: %d %d\n", 2613 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2614 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2615 } 2616 2617 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2618 if (!ctxp->wqeq) { 2619 /* Issue ABTS for this WQE based on iotag */ 2620 ctxp->wqeq = lpfc_sli_get_iocbq(phba); 2621 if (!ctxp->wqeq) { 2622 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2623 "6068 Abort failed: No wqeqs: " 2624 "xri: x%x\n", xri); 2625 /* No failure to an ABTS request. */ 2626 kfree(ctxp); 2627 return 0; 2628 } 2629 } 2630 abts_wqeq = ctxp->wqeq; 2631 wqe_abts = &abts_wqeq->wqe; 2632 2633 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { 2634 rc = WQE_BUSY; 2635 goto out; 2636 } 2637 2638 spin_lock_irqsave(&phba->hbalock, flags); 2639 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; 2640 abts_wqeq->iocb_cmpl = 0; 2641 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; 2642 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2643 spin_unlock_irqrestore(&phba->hbalock, flags); 2644 if (rc == WQE_SUCCESS) { 2645 atomic_inc(&tgtp->xmt_abort_unsol); 2646 return 0; 2647 } 2648 out: 2649 atomic_inc(&tgtp->xmt_abort_rsp_error); 2650 abts_wqeq->context2 = NULL; 2651 abts_wqeq->context3 = NULL; 2652 lpfc_sli_release_iocbq(phba, abts_wqeq); 2653 kfree(ctxp); 2654 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2655 "6056 Failed to Issue ABTS. Status x%x\n", rc); 2656 return 0; 2657 } 2658