1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channsel Host Bus Adapters. * 4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 ********************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/delay.h> 27 #include <asm/unaligned.h> 28 #include <linux/crc-t10dif.h> 29 #include <net/checksum.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_tcq.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include <../drivers/nvme/host/nvme.h> 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_version.h" 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc.h" 50 #include "lpfc_scsi.h" 51 #include "lpfc_nvme.h" 52 #include "lpfc_nvmet.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_vport.h" 56 #include "lpfc_debugfs.h" 57 58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *, 59 struct lpfc_nvmet_rcv_ctx *, 60 dma_addr_t rspbuf, 61 uint16_t rspsize); 62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *, 63 struct lpfc_nvmet_rcv_ctx *); 64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *, 65 struct lpfc_nvmet_rcv_ctx *, 66 uint32_t, uint16_t); 67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *, 68 struct lpfc_nvmet_rcv_ctx *, 69 uint32_t, uint16_t); 70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, 71 struct lpfc_nvmet_rcv_ctx *, 72 uint32_t, uint16_t); 73 74 void 75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) 76 { 77 unsigned long iflag; 78 79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 80 "6313 NVMET Defer ctx release xri x%x flg x%x\n", 81 ctxp->oxid, ctxp->flag); 82 83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) { 85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, 86 iflag); 87 return; 88 } 89 ctxp->flag |= LPFC_NVMET_CTX_RLS; 90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); 91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); 92 } 93 94 /** 95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response 96 * @phba: Pointer to HBA context object. 97 * @cmdwqe: Pointer to driver command WQE object. 98 * @wcqe: Pointer to driver response CQE object. 99 * 100 * The function is called from SLI ring event handler with no 101 * lock held. This function is the completion handler for NVME LS commands 102 * The function frees memory resources used for the NVME commands. 103 **/ 104 static void 105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 106 struct lpfc_wcqe_complete *wcqe) 107 { 108 struct lpfc_nvmet_tgtport *tgtp; 109 struct nvmefc_tgt_ls_req *rsp; 110 struct lpfc_nvmet_rcv_ctx *ctxp; 111 uint32_t status, result; 112 113 status = bf_get(lpfc_wcqe_c_status, wcqe); 114 result = wcqe->parameter; 115 ctxp = cmdwqe->context2; 116 117 if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) { 118 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 119 "6410 NVMET LS cmpl state mismatch IO x%x: " 120 "%d %d\n", 121 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 122 } 123 124 if (!phba->targetport) 125 goto out; 126 127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 128 129 if (status) 130 atomic_inc(&tgtp->xmt_ls_rsp_error); 131 else 132 atomic_inc(&tgtp->xmt_ls_rsp_cmpl); 133 134 out: 135 rsp = &ctxp->ctx.ls_req; 136 137 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n", 138 ctxp->oxid, status, result); 139 140 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 141 "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n", 142 status, result, ctxp->oxid); 143 144 lpfc_nlp_put(cmdwqe->context1); 145 cmdwqe->context2 = NULL; 146 cmdwqe->context3 = NULL; 147 lpfc_sli_release_iocbq(phba, cmdwqe); 148 rsp->done(rsp); 149 kfree(ctxp); 150 } 151 152 /** 153 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context 154 * @phba: HBA buffer is associated with 155 * @ctxp: context to clean up 156 * @mp: Buffer to free 157 * 158 * Description: Frees the given DMA buffer in the appropriate way given by 159 * reposting it to its associated RQ so it can be reused. 160 * 161 * Notes: Takes phba->hbalock. Can be called with or without other locks held. 162 * 163 * Returns: None 164 **/ 165 void 166 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) 167 { 168 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 169 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; 170 struct lpfc_nvmet_tgtport *tgtp; 171 struct fc_frame_header *fc_hdr; 172 struct rqb_dmabuf *nvmebuf; 173 uint32_t *payload; 174 uint32_t size, oxid, sid, rc; 175 unsigned long iflag; 176 177 if (ctxp->txrdy) { 178 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, 179 ctxp->txrdy_phys); 180 ctxp->txrdy = NULL; 181 ctxp->txrdy_phys = 0; 182 } 183 184 if (ctxp->state == LPFC_NVMET_STE_FREE) { 185 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 186 "6411 NVMET free, already free IO x%x: %d %d\n", 187 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 188 } 189 ctxp->state = LPFC_NVMET_STE_FREE; 190 191 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 192 if (phba->sli4_hba.nvmet_io_wait_cnt) { 193 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list, 194 nvmebuf, struct rqb_dmabuf, 195 hbuf.list); 196 phba->sli4_hba.nvmet_io_wait_cnt--; 197 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 198 iflag); 199 200 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 201 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 202 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 203 payload = (uint32_t *)(nvmebuf->dbuf.virt); 204 size = nvmebuf->bytes_recv; 205 sid = sli4_sid_from_fc_hdr(fc_hdr); 206 207 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 208 ctxp->wqeq = NULL; 209 ctxp->txrdy = NULL; 210 ctxp->offset = 0; 211 ctxp->phba = phba; 212 ctxp->size = size; 213 ctxp->oxid = oxid; 214 ctxp->sid = sid; 215 ctxp->state = LPFC_NVMET_STE_RCV; 216 ctxp->entry_cnt = 1; 217 ctxp->flag = 0; 218 ctxp->ctxbuf = ctx_buf; 219 spin_lock_init(&ctxp->ctxlock); 220 221 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 222 if (phba->ktime_on) { 223 ctxp->ts_cmd_nvme = ktime_get_ns(); 224 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme; 225 ctxp->ts_nvme_data = 0; 226 ctxp->ts_data_wqput = 0; 227 ctxp->ts_isr_data = 0; 228 ctxp->ts_data_nvme = 0; 229 ctxp->ts_nvme_status = 0; 230 ctxp->ts_status_wqput = 0; 231 ctxp->ts_isr_status = 0; 232 ctxp->ts_status_nvme = 0; 233 } 234 #endif 235 atomic_inc(&tgtp->rcv_fcp_cmd_in); 236 /* 237 * The calling sequence should be: 238 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done 239 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 240 * When we return from nvmet_fc_rcv_fcp_req, all relevant info 241 * the NVME command / FC header is stored. 242 * A buffer has already been reposted for this IO, so just free 243 * the nvmebuf. 244 */ 245 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 246 payload, size); 247 248 /* Process FCP command */ 249 if (rc == 0) { 250 atomic_inc(&tgtp->rcv_fcp_cmd_out); 251 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 252 return; 253 } 254 255 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 256 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 257 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 258 ctxp->oxid, rc, 259 atomic_read(&tgtp->rcv_fcp_cmd_in), 260 atomic_read(&tgtp->rcv_fcp_cmd_out), 261 atomic_read(&tgtp->xmt_fcp_release)); 262 263 lpfc_nvmet_defer_release(phba, ctxp); 264 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 265 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); 266 return; 267 } 268 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 269 270 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); 271 list_add_tail(&ctx_buf->list, 272 &phba->sli4_hba.lpfc_nvmet_ctx_put_list); 273 phba->sli4_hba.nvmet_ctx_put_cnt++; 274 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag); 275 #endif 276 } 277 278 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 279 static void 280 lpfc_nvmet_ktime(struct lpfc_hba *phba, 281 struct lpfc_nvmet_rcv_ctx *ctxp) 282 { 283 uint64_t seg1, seg2, seg3, seg4, seg5; 284 uint64_t seg6, seg7, seg8, seg9, seg10; 285 286 if (!phba->ktime_on) 287 return; 288 289 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme || 290 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput || 291 !ctxp->ts_isr_data || !ctxp->ts_data_nvme || 292 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput || 293 !ctxp->ts_isr_status || !ctxp->ts_status_nvme) 294 return; 295 296 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme) 297 return; 298 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data) 299 return; 300 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput) 301 return; 302 if (ctxp->ts_data_wqput > ctxp->ts_isr_data) 303 return; 304 if (ctxp->ts_isr_data > ctxp->ts_data_nvme) 305 return; 306 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status) 307 return; 308 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput) 309 return; 310 if (ctxp->ts_status_wqput > ctxp->ts_isr_status) 311 return; 312 if (ctxp->ts_isr_status > ctxp->ts_status_nvme) 313 return; 314 /* 315 * Segment 1 - Time from FCP command received by MSI-X ISR 316 * to FCP command is passed to NVME Layer. 317 * Segment 2 - Time from FCP command payload handed 318 * off to NVME Layer to Driver receives a Command op 319 * from NVME Layer. 320 * Segment 3 - Time from Driver receives a Command op 321 * from NVME Layer to Command is put on WQ. 322 * Segment 4 - Time from Driver WQ put is done 323 * to MSI-X ISR for Command cmpl. 324 * Segment 5 - Time from MSI-X ISR for Command cmpl to 325 * Command cmpl is passed to NVME Layer. 326 * Segment 6 - Time from Command cmpl is passed to NVME 327 * Layer to Driver receives a RSP op from NVME Layer. 328 * Segment 7 - Time from Driver receives a RSP op from 329 * NVME Layer to WQ put is done on TRSP FCP Status. 330 * Segment 8 - Time from Driver WQ put is done on TRSP 331 * FCP Status to MSI-X ISR for TRSP cmpl. 332 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to 333 * TRSP cmpl is passed to NVME Layer. 334 * Segment 10 - Time from FCP command received by 335 * MSI-X ISR to command is completed on wire. 336 * (Segments 1 thru 8) for READDATA / WRITEDATA 337 * (Segments 1 thru 4) for READDATA_RSP 338 */ 339 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd; 340 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1; 341 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) - 342 seg1 - seg2; 343 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) - 344 seg1 - seg2 - seg3; 345 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) - 346 seg1 - seg2 - seg3 - seg4; 347 348 /* For auto rsp commands seg6 thru seg10 will be 0 */ 349 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) { 350 seg6 = (ctxp->ts_nvme_status - 351 ctxp->ts_isr_cmd) - 352 seg1 - seg2 - seg3 - seg4 - seg5; 353 seg7 = (ctxp->ts_status_wqput - 354 ctxp->ts_isr_cmd) - 355 seg1 - seg2 - seg3 - 356 seg4 - seg5 - seg6; 357 seg8 = (ctxp->ts_isr_status - 358 ctxp->ts_isr_cmd) - 359 seg1 - seg2 - seg3 - seg4 - 360 seg5 - seg6 - seg7; 361 seg9 = (ctxp->ts_status_nvme - 362 ctxp->ts_isr_cmd) - 363 seg1 - seg2 - seg3 - seg4 - 364 seg5 - seg6 - seg7 - seg8; 365 seg10 = (ctxp->ts_isr_status - 366 ctxp->ts_isr_cmd); 367 } else { 368 seg6 = 0; 369 seg7 = 0; 370 seg8 = 0; 371 seg9 = 0; 372 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd); 373 } 374 375 phba->ktime_seg1_total += seg1; 376 if (seg1 < phba->ktime_seg1_min) 377 phba->ktime_seg1_min = seg1; 378 else if (seg1 > phba->ktime_seg1_max) 379 phba->ktime_seg1_max = seg1; 380 381 phba->ktime_seg2_total += seg2; 382 if (seg2 < phba->ktime_seg2_min) 383 phba->ktime_seg2_min = seg2; 384 else if (seg2 > phba->ktime_seg2_max) 385 phba->ktime_seg2_max = seg2; 386 387 phba->ktime_seg3_total += seg3; 388 if (seg3 < phba->ktime_seg3_min) 389 phba->ktime_seg3_min = seg3; 390 else if (seg3 > phba->ktime_seg3_max) 391 phba->ktime_seg3_max = seg3; 392 393 phba->ktime_seg4_total += seg4; 394 if (seg4 < phba->ktime_seg4_min) 395 phba->ktime_seg4_min = seg4; 396 else if (seg4 > phba->ktime_seg4_max) 397 phba->ktime_seg4_max = seg4; 398 399 phba->ktime_seg5_total += seg5; 400 if (seg5 < phba->ktime_seg5_min) 401 phba->ktime_seg5_min = seg5; 402 else if (seg5 > phba->ktime_seg5_max) 403 phba->ktime_seg5_max = seg5; 404 405 phba->ktime_data_samples++; 406 if (!seg6) 407 goto out; 408 409 phba->ktime_seg6_total += seg6; 410 if (seg6 < phba->ktime_seg6_min) 411 phba->ktime_seg6_min = seg6; 412 else if (seg6 > phba->ktime_seg6_max) 413 phba->ktime_seg6_max = seg6; 414 415 phba->ktime_seg7_total += seg7; 416 if (seg7 < phba->ktime_seg7_min) 417 phba->ktime_seg7_min = seg7; 418 else if (seg7 > phba->ktime_seg7_max) 419 phba->ktime_seg7_max = seg7; 420 421 phba->ktime_seg8_total += seg8; 422 if (seg8 < phba->ktime_seg8_min) 423 phba->ktime_seg8_min = seg8; 424 else if (seg8 > phba->ktime_seg8_max) 425 phba->ktime_seg8_max = seg8; 426 427 phba->ktime_seg9_total += seg9; 428 if (seg9 < phba->ktime_seg9_min) 429 phba->ktime_seg9_min = seg9; 430 else if (seg9 > phba->ktime_seg9_max) 431 phba->ktime_seg9_max = seg9; 432 out: 433 phba->ktime_seg10_total += seg10; 434 if (seg10 < phba->ktime_seg10_min) 435 phba->ktime_seg10_min = seg10; 436 else if (seg10 > phba->ktime_seg10_max) 437 phba->ktime_seg10_max = seg10; 438 phba->ktime_status_samples++; 439 } 440 #endif 441 442 /** 443 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response 444 * @phba: Pointer to HBA context object. 445 * @cmdwqe: Pointer to driver command WQE object. 446 * @wcqe: Pointer to driver response CQE object. 447 * 448 * The function is called from SLI ring event handler with no 449 * lock held. This function is the completion handler for NVME FCP commands 450 * The function frees memory resources used for the NVME commands. 451 **/ 452 static void 453 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 454 struct lpfc_wcqe_complete *wcqe) 455 { 456 struct lpfc_nvmet_tgtport *tgtp; 457 struct nvmefc_tgt_fcp_req *rsp; 458 struct lpfc_nvmet_rcv_ctx *ctxp; 459 uint32_t status, result, op, start_clean; 460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 461 uint32_t id; 462 #endif 463 464 ctxp = cmdwqe->context2; 465 ctxp->flag &= ~LPFC_NVMET_IO_INP; 466 467 rsp = &ctxp->ctx.fcp_req; 468 op = rsp->op; 469 470 status = bf_get(lpfc_wcqe_c_status, wcqe); 471 result = wcqe->parameter; 472 473 if (phba->targetport) 474 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 475 else 476 tgtp = NULL; 477 478 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n", 479 ctxp->oxid, op, status); 480 481 if (status) { 482 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR; 483 rsp->transferred_length = 0; 484 if (tgtp) 485 atomic_inc(&tgtp->xmt_fcp_rsp_error); 486 487 /* pick up SLI4 exhange busy condition */ 488 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 489 ctxp->flag |= LPFC_NVMET_XBUSY; 490 491 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 492 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n", 493 ctxp->oxid, status, result); 494 } else { 495 ctxp->flag &= ~LPFC_NVMET_XBUSY; 496 } 497 498 } else { 499 rsp->fcp_error = NVME_SC_SUCCESS; 500 if (op == NVMET_FCOP_RSP) 501 rsp->transferred_length = rsp->rsplen; 502 else 503 rsp->transferred_length = rsp->transfer_length; 504 if (tgtp) 505 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl); 506 } 507 508 if ((op == NVMET_FCOP_READDATA_RSP) || 509 (op == NVMET_FCOP_RSP)) { 510 /* Sanity check */ 511 ctxp->state = LPFC_NVMET_STE_DONE; 512 ctxp->entry_cnt++; 513 514 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 515 if (phba->ktime_on) { 516 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 517 ctxp->ts_isr_data = 518 cmdwqe->isr_timestamp; 519 ctxp->ts_data_nvme = 520 ktime_get_ns(); 521 ctxp->ts_nvme_status = 522 ctxp->ts_data_nvme; 523 ctxp->ts_status_wqput = 524 ctxp->ts_data_nvme; 525 ctxp->ts_isr_status = 526 ctxp->ts_data_nvme; 527 ctxp->ts_status_nvme = 528 ctxp->ts_data_nvme; 529 } else { 530 ctxp->ts_isr_status = 531 cmdwqe->isr_timestamp; 532 ctxp->ts_status_nvme = 533 ktime_get_ns(); 534 } 535 } 536 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 537 id = smp_processor_id(); 538 if (ctxp->cpu != id) 539 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 540 "6703 CPU Check cmpl: " 541 "cpu %d expect %d\n", 542 id, ctxp->cpu); 543 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 544 phba->cpucheck_cmpl_io[id]++; 545 } 546 #endif 547 rsp->done(rsp); 548 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 549 if (phba->ktime_on) 550 lpfc_nvmet_ktime(phba, ctxp); 551 #endif 552 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */ 553 } else { 554 ctxp->entry_cnt++; 555 start_clean = offsetof(struct lpfc_iocbq, wqe); 556 memset(((char *)cmdwqe) + start_clean, 0, 557 (sizeof(struct lpfc_iocbq) - start_clean)); 558 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 559 if (phba->ktime_on) { 560 ctxp->ts_isr_data = cmdwqe->isr_timestamp; 561 ctxp->ts_data_nvme = ktime_get_ns(); 562 } 563 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 564 id = smp_processor_id(); 565 if (ctxp->cpu != id) 566 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 567 "6704 CPU Check cmdcmpl: " 568 "cpu %d expect %d\n", 569 id, ctxp->cpu); 570 if (ctxp->cpu < LPFC_CHECK_CPU_CNT) 571 phba->cpucheck_ccmpl_io[id]++; 572 } 573 #endif 574 rsp->done(rsp); 575 } 576 } 577 578 static int 579 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, 580 struct nvmefc_tgt_ls_req *rsp) 581 { 582 struct lpfc_nvmet_rcv_ctx *ctxp = 583 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req); 584 struct lpfc_hba *phba = ctxp->phba; 585 struct hbq_dmabuf *nvmebuf = 586 (struct hbq_dmabuf *)ctxp->rqb_buffer; 587 struct lpfc_iocbq *nvmewqeq; 588 struct lpfc_nvmet_tgtport *nvmep = tgtport->private; 589 struct lpfc_dmabuf dmabuf; 590 struct ulp_bde64 bpl; 591 int rc; 592 593 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 594 "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid); 595 596 if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) || 597 (ctxp->entry_cnt != 1)) { 598 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 599 "6412 NVMET LS rsp state mismatch " 600 "oxid x%x: %d %d\n", 601 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 602 } 603 ctxp->state = LPFC_NVMET_STE_LS_RSP; 604 ctxp->entry_cnt++; 605 606 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma, 607 rsp->rsplen); 608 if (nvmewqeq == NULL) { 609 atomic_inc(&nvmep->xmt_ls_drop); 610 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 611 "6150 LS Drop IO x%x: Prep\n", 612 ctxp->oxid); 613 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 614 atomic_inc(&nvmep->xmt_ls_abort); 615 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, 616 ctxp->sid, ctxp->oxid); 617 return -ENOMEM; 618 } 619 620 /* Save numBdes for bpl2sgl */ 621 nvmewqeq->rsvd2 = 1; 622 nvmewqeq->hba_wqidx = 0; 623 nvmewqeq->context3 = &dmabuf; 624 dmabuf.virt = &bpl; 625 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow; 626 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh; 627 bpl.tus.f.bdeSize = rsp->rsplen; 628 bpl.tus.f.bdeFlags = 0; 629 bpl.tus.w = le32_to_cpu(bpl.tus.w); 630 631 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp; 632 nvmewqeq->iocb_cmpl = NULL; 633 nvmewqeq->context2 = ctxp; 634 635 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", 636 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); 637 638 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); 639 if (rc == WQE_SUCCESS) { 640 /* 641 * Okay to repost buffer here, but wait till cmpl 642 * before freeing ctxp and iocbq. 643 */ 644 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 645 ctxp->rqb_buffer = 0; 646 atomic_inc(&nvmep->xmt_ls_rsp); 647 return 0; 648 } 649 /* Give back resources */ 650 atomic_inc(&nvmep->xmt_ls_drop); 651 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 652 "6151 LS Drop IO x%x: Issue %d\n", 653 ctxp->oxid, rc); 654 655 lpfc_nlp_put(nvmewqeq->context1); 656 657 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 658 atomic_inc(&nvmep->xmt_ls_abort); 659 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); 660 return -ENXIO; 661 } 662 663 static int 664 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, 665 struct nvmefc_tgt_fcp_req *rsp) 666 { 667 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 668 struct lpfc_nvmet_rcv_ctx *ctxp = 669 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 670 struct lpfc_hba *phba = ctxp->phba; 671 struct lpfc_iocbq *nvmewqeq; 672 int rc; 673 674 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 675 if (phba->ktime_on) { 676 if (rsp->op == NVMET_FCOP_RSP) 677 ctxp->ts_nvme_status = ktime_get_ns(); 678 else 679 ctxp->ts_nvme_data = ktime_get_ns(); 680 } 681 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { 682 int id = smp_processor_id(); 683 ctxp->cpu = id; 684 if (id < LPFC_CHECK_CPU_CNT) 685 phba->cpucheck_xmt_io[id]++; 686 if (rsp->hwqid != id) { 687 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 688 "6705 CPU Check OP: " 689 "cpu %d expect %d\n", 690 id, rsp->hwqid); 691 ctxp->cpu = rsp->hwqid; 692 } 693 } 694 #endif 695 696 /* Sanity check */ 697 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) || 698 (ctxp->state == LPFC_NVMET_STE_ABORT)) { 699 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 700 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 701 "6102 IO xri x%x aborted\n", 702 ctxp->oxid); 703 rc = -ENXIO; 704 goto aerr; 705 } 706 707 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp); 708 if (nvmewqeq == NULL) { 709 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 710 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 711 "6152 FCP Drop IO x%x: Prep\n", 712 ctxp->oxid); 713 rc = -ENXIO; 714 goto aerr; 715 } 716 717 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp; 718 nvmewqeq->iocb_cmpl = NULL; 719 nvmewqeq->context2 = ctxp; 720 nvmewqeq->iocb_flag |= LPFC_IO_NVMET; 721 ctxp->wqeq->hba_wqidx = rsp->hwqid; 722 723 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n", 724 ctxp->oxid, rsp->op, rsp->rsplen); 725 726 ctxp->flag |= LPFC_NVMET_IO_INP; 727 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); 728 if (rc == WQE_SUCCESS) { 729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 730 if (!phba->ktime_on) 731 return 0; 732 if (rsp->op == NVMET_FCOP_RSP) 733 ctxp->ts_status_wqput = ktime_get_ns(); 734 else 735 ctxp->ts_data_wqput = ktime_get_ns(); 736 #endif 737 return 0; 738 } 739 740 /* Give back resources */ 741 atomic_inc(&lpfc_nvmep->xmt_fcp_drop); 742 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 743 "6153 FCP Drop IO x%x: Issue: %d\n", 744 ctxp->oxid, rc); 745 746 ctxp->wqeq->hba_wqidx = 0; 747 nvmewqeq->context2 = NULL; 748 nvmewqeq->context3 = NULL; 749 rc = -EBUSY; 750 aerr: 751 return rc; 752 } 753 754 static void 755 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) 756 { 757 struct lpfc_nvmet_tgtport *tport = targetport->private; 758 759 /* release any threads waiting for the unreg to complete */ 760 complete(&tport->tport_unreg_done); 761 } 762 763 static void 764 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, 765 struct nvmefc_tgt_fcp_req *req) 766 { 767 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 768 struct lpfc_nvmet_rcv_ctx *ctxp = 769 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 770 struct lpfc_hba *phba = ctxp->phba; 771 unsigned long flags; 772 773 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 774 "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", 775 ctxp->oxid, ctxp->flag, ctxp->state); 776 777 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n", 778 ctxp->oxid, ctxp->flag, ctxp->state); 779 780 atomic_inc(&lpfc_nvmep->xmt_fcp_abort); 781 782 spin_lock_irqsave(&ctxp->ctxlock, flags); 783 784 /* Since iaab/iaar are NOT set, we need to check 785 * if the firmware is in process of aborting IO 786 */ 787 if (ctxp->flag & LPFC_NVMET_XBUSY) { 788 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 789 return; 790 } 791 ctxp->flag |= LPFC_NVMET_ABORT_OP; 792 793 /* An state of LPFC_NVMET_STE_RCV means we have just received 794 * the NVME command and have not started processing it. 795 * (by issuing any IO WQEs on this exchange yet) 796 */ 797 if (ctxp->state == LPFC_NVMET_STE_RCV) 798 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, 799 ctxp->oxid); 800 else 801 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, 802 ctxp->oxid); 803 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 804 } 805 806 static void 807 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, 808 struct nvmefc_tgt_fcp_req *rsp) 809 { 810 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private; 811 struct lpfc_nvmet_rcv_ctx *ctxp = 812 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 813 struct lpfc_hba *phba = ctxp->phba; 814 unsigned long flags; 815 bool aborting = false; 816 817 if (ctxp->state != LPFC_NVMET_STE_DONE && 818 ctxp->state != LPFC_NVMET_STE_ABORT) { 819 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 820 "6413 NVMET release bad state %d %d oxid x%x\n", 821 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 822 } 823 824 spin_lock_irqsave(&ctxp->ctxlock, flags); 825 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || 826 (ctxp->flag & LPFC_NVMET_XBUSY)) { 827 aborting = true; 828 /* let the abort path do the real release */ 829 lpfc_nvmet_defer_release(phba, ctxp); 830 } 831 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 832 833 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, 834 ctxp->state, aborting); 835 836 atomic_inc(&lpfc_nvmep->xmt_fcp_release); 837 838 if (aborting) 839 return; 840 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 842 } 843 844 static void 845 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, 846 struct nvmefc_tgt_fcp_req *rsp) 847 { 848 struct lpfc_nvmet_tgtport *tgtp; 849 struct lpfc_nvmet_rcv_ctx *ctxp = 850 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); 851 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; 852 struct lpfc_hba *phba = ctxp->phba; 853 854 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", 855 ctxp->oxid, ctxp->size, smp_processor_id()); 856 857 tgtp = phba->targetport->private; 858 atomic_inc(&tgtp->rcv_fcp_cmd_defer); 859 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 860 } 861 862 static struct nvmet_fc_target_template lpfc_tgttemplate = { 863 .targetport_delete = lpfc_nvmet_targetport_delete, 864 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 865 .fcp_op = lpfc_nvmet_xmt_fcp_op, 866 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 867 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 868 .defer_rcv = lpfc_nvmet_defer_rcv, 869 870 .max_hw_queues = 1, 871 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 872 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 873 .dma_boundary = 0xFFFFFFFF, 874 875 /* optional features */ 876 .target_features = 0, 877 /* sizes of additional private data for data structures */ 878 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport), 879 }; 880 881 static void 882 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) 883 { 884 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf; 885 unsigned long flags; 886 887 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags); 888 spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); 889 list_for_each_entry_safe(ctx_buf, next_ctx_buf, 890 &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) { 891 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 892 list_del_init(&ctx_buf->list); 893 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 894 __lpfc_clear_active_sglq(phba, 895 ctx_buf->sglq->sli4_lxritag); 896 ctx_buf->sglq->state = SGL_FREED; 897 ctx_buf->sglq->ndlp = NULL; 898 899 spin_lock(&phba->sli4_hba.sgl_list_lock); 900 list_add_tail(&ctx_buf->sglq->list, 901 &phba->sli4_hba.lpfc_nvmet_sgl_list); 902 spin_unlock(&phba->sli4_hba.sgl_list_lock); 903 904 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 905 kfree(ctx_buf->context); 906 } 907 list_for_each_entry_safe(ctx_buf, next_ctx_buf, 908 &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) { 909 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 910 list_del_init(&ctx_buf->list); 911 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 912 __lpfc_clear_active_sglq(phba, 913 ctx_buf->sglq->sli4_lxritag); 914 ctx_buf->sglq->state = SGL_FREED; 915 ctx_buf->sglq->ndlp = NULL; 916 917 spin_lock(&phba->sli4_hba.sgl_list_lock); 918 list_add_tail(&ctx_buf->sglq->list, 919 &phba->sli4_hba.lpfc_nvmet_sgl_list); 920 spin_unlock(&phba->sli4_hba.sgl_list_lock); 921 922 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 923 kfree(ctx_buf->context); 924 } 925 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 926 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags); 927 } 928 929 static int 930 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) 931 { 932 struct lpfc_nvmet_ctxbuf *ctx_buf; 933 struct lpfc_iocbq *nvmewqe; 934 union lpfc_wqe128 *wqe; 935 int i; 936 937 lpfc_printf_log(phba, KERN_INFO, LOG_NVME, 938 "6403 Allocate NVMET resources for %d XRIs\n", 939 phba->sli4_hba.nvmet_xri_cnt); 940 941 /* For all nvmet xris, allocate resources needed to process a 942 * received command on a per xri basis. 943 */ 944 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { 945 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); 946 if (!ctx_buf) { 947 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 948 "6404 Ran out of memory for NVMET\n"); 949 return -ENOMEM; 950 } 951 952 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context), 953 GFP_KERNEL); 954 if (!ctx_buf->context) { 955 kfree(ctx_buf); 956 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 957 "6405 Ran out of NVMET " 958 "context memory\n"); 959 return -ENOMEM; 960 } 961 ctx_buf->context->ctxbuf = ctx_buf; 962 ctx_buf->context->state = LPFC_NVMET_STE_FREE; 963 964 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba); 965 if (!ctx_buf->iocbq) { 966 kfree(ctx_buf->context); 967 kfree(ctx_buf); 968 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 969 "6406 Ran out of NVMET iocb/WQEs\n"); 970 return -ENOMEM; 971 } 972 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET; 973 nvmewqe = ctx_buf->iocbq; 974 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 975 /* Initialize WQE */ 976 memset(wqe, 0, sizeof(union lpfc_wqe)); 977 /* Word 7 */ 978 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI); 979 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3); 980 bf_set(wqe_pu, &wqe->generic.wqe_com, 1); 981 /* Word 10 */ 982 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 983 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0); 984 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0); 985 986 ctx_buf->iocbq->context1 = NULL; 987 spin_lock(&phba->sli4_hba.sgl_list_lock); 988 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq); 989 spin_unlock(&phba->sli4_hba.sgl_list_lock); 990 if (!ctx_buf->sglq) { 991 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq); 992 kfree(ctx_buf->context); 993 kfree(ctx_buf); 994 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 995 "6407 Ran out of NVMET XRIs\n"); 996 return -ENOMEM; 997 } 998 spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock); 999 list_add_tail(&ctx_buf->list, 1000 &phba->sli4_hba.lpfc_nvmet_ctx_get_list); 1001 spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock); 1002 } 1003 phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt; 1004 return 0; 1005 } 1006 1007 int 1008 lpfc_nvmet_create_targetport(struct lpfc_hba *phba) 1009 { 1010 struct lpfc_vport *vport = phba->pport; 1011 struct lpfc_nvmet_tgtport *tgtp; 1012 struct nvmet_fc_port_info pinfo; 1013 int error; 1014 1015 if (phba->targetport) 1016 return 0; 1017 1018 error = lpfc_nvmet_setup_io_context(phba); 1019 if (error) 1020 return error; 1021 1022 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info)); 1023 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn); 1024 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn); 1025 pinfo.port_id = vport->fc_myDID; 1026 1027 /* Limit to LPFC_MAX_NVME_SEG_CNT. 1028 * For now need + 1 to get around NVME transport logic. 1029 */ 1030 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { 1031 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, 1032 "6400 Reducing sg segment cnt to %d\n", 1033 LPFC_MAX_NVME_SEG_CNT); 1034 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; 1035 } else { 1036 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; 1037 } 1038 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; 1039 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; 1040 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 1041 NVMET_FCTGTFEAT_CMD_IN_ISR | 1042 NVMET_FCTGTFEAT_OPDONE_IN_ISR; 1043 1044 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1045 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 1046 &phba->pcidev->dev, 1047 &phba->targetport); 1048 #else 1049 error = -ENOENT; 1050 #endif 1051 if (error) { 1052 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1053 "6025 Cannot register NVME targetport " 1054 "x%x\n", error); 1055 phba->targetport = NULL; 1056 1057 lpfc_nvmet_cleanup_io_context(phba); 1058 1059 } else { 1060 tgtp = (struct lpfc_nvmet_tgtport *) 1061 phba->targetport->private; 1062 tgtp->phba = phba; 1063 1064 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1065 "6026 Registered NVME " 1066 "targetport: %p, private %p " 1067 "portnm %llx nodenm %llx\n", 1068 phba->targetport, tgtp, 1069 pinfo.port_name, pinfo.node_name); 1070 1071 atomic_set(&tgtp->rcv_ls_req_in, 0); 1072 atomic_set(&tgtp->rcv_ls_req_out, 0); 1073 atomic_set(&tgtp->rcv_ls_req_drop, 0); 1074 atomic_set(&tgtp->xmt_ls_abort, 0); 1075 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0); 1076 atomic_set(&tgtp->xmt_ls_rsp, 0); 1077 atomic_set(&tgtp->xmt_ls_drop, 0); 1078 atomic_set(&tgtp->xmt_ls_rsp_error, 0); 1079 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0); 1080 atomic_set(&tgtp->rcv_fcp_cmd_in, 0); 1081 atomic_set(&tgtp->rcv_fcp_cmd_out, 0); 1082 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0); 1083 atomic_set(&tgtp->xmt_fcp_drop, 0); 1084 atomic_set(&tgtp->xmt_fcp_read_rsp, 0); 1085 atomic_set(&tgtp->xmt_fcp_read, 0); 1086 atomic_set(&tgtp->xmt_fcp_write, 0); 1087 atomic_set(&tgtp->xmt_fcp_rsp, 0); 1088 atomic_set(&tgtp->xmt_fcp_release, 0); 1089 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0); 1090 atomic_set(&tgtp->xmt_fcp_rsp_error, 0); 1091 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0); 1092 atomic_set(&tgtp->xmt_fcp_abort, 0); 1093 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0); 1094 atomic_set(&tgtp->xmt_abort_unsol, 0); 1095 atomic_set(&tgtp->xmt_abort_sol, 0); 1096 atomic_set(&tgtp->xmt_abort_rsp, 0); 1097 atomic_set(&tgtp->xmt_abort_rsp_error, 0); 1098 } 1099 return error; 1100 } 1101 1102 int 1103 lpfc_nvmet_update_targetport(struct lpfc_hba *phba) 1104 { 1105 struct lpfc_vport *vport = phba->pport; 1106 1107 if (!phba->targetport) 1108 return 0; 1109 1110 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, 1111 "6007 Update NVMET port %p did x%x\n", 1112 phba->targetport, vport->fc_myDID); 1113 1114 phba->targetport->port_id = vport->fc_myDID; 1115 return 0; 1116 } 1117 1118 /** 1119 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort 1120 * @phba: pointer to lpfc hba data structure. 1121 * @axri: pointer to the nvmet xri abort wcqe structure. 1122 * 1123 * This routine is invoked by the worker thread to process a SLI4 fast-path 1124 * NVMET aborted xri. 1125 **/ 1126 void 1127 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, 1128 struct sli4_wcqe_xri_aborted *axri) 1129 { 1130 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 1131 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 1132 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1133 struct lpfc_nodelist *ndlp; 1134 unsigned long iflag = 0; 1135 int rrq_empty = 0; 1136 bool released = false; 1137 1138 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1139 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid); 1140 1141 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 1142 return; 1143 spin_lock_irqsave(&phba->hbalock, iflag); 1144 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1145 list_for_each_entry_safe(ctxp, next_ctxp, 1146 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1147 list) { 1148 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1149 continue; 1150 1151 /* Check if we already received a free context call 1152 * and we have completed processing an abort situation. 1153 */ 1154 if (ctxp->flag & LPFC_NVMET_CTX_RLS && 1155 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) { 1156 list_del(&ctxp->list); 1157 released = true; 1158 } 1159 ctxp->flag &= ~LPFC_NVMET_XBUSY; 1160 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1161 1162 rrq_empty = list_empty(&phba->active_rrq_list); 1163 spin_unlock_irqrestore(&phba->hbalock, iflag); 1164 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1165 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1166 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 1167 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 1168 lpfc_set_rrq_active(phba, ndlp, 1169 ctxp->ctxbuf->sglq->sli4_lxritag, 1170 rxid, 1); 1171 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 1172 } 1173 1174 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1175 "6318 XB aborted oxid %x flg x%x (%x)\n", 1176 ctxp->oxid, ctxp->flag, released); 1177 if (released) 1178 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 1179 1180 if (rrq_empty) 1181 lpfc_worker_wake_up(phba); 1182 return; 1183 } 1184 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1185 spin_unlock_irqrestore(&phba->hbalock, iflag); 1186 } 1187 1188 int 1189 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, 1190 struct fc_frame_header *fc_hdr) 1191 1192 { 1193 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1194 struct lpfc_hba *phba = vport->phba; 1195 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; 1196 struct nvmefc_tgt_fcp_req *rsp; 1197 uint16_t xri; 1198 unsigned long iflag = 0; 1199 1200 xri = be16_to_cpu(fc_hdr->fh_ox_id); 1201 1202 spin_lock_irqsave(&phba->hbalock, iflag); 1203 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1204 list_for_each_entry_safe(ctxp, next_ctxp, 1205 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, 1206 list) { 1207 if (ctxp->ctxbuf->sglq->sli4_xritag != xri) 1208 continue; 1209 1210 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1211 spin_unlock_irqrestore(&phba->hbalock, iflag); 1212 1213 spin_lock_irqsave(&ctxp->ctxlock, iflag); 1214 ctxp->flag |= LPFC_NVMET_ABTS_RCV; 1215 spin_unlock_irqrestore(&ctxp->ctxlock, iflag); 1216 1217 lpfc_nvmeio_data(phba, 1218 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1219 xri, smp_processor_id(), 0); 1220 1221 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1222 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri); 1223 1224 rsp = &ctxp->ctx.fcp_req; 1225 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp); 1226 1227 /* Respond with BA_ACC accordingly */ 1228 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); 1229 return 0; 1230 } 1231 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); 1232 spin_unlock_irqrestore(&phba->hbalock, iflag); 1233 1234 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", 1235 xri, smp_processor_id(), 1); 1236 1237 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 1238 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri); 1239 1240 /* Respond with BA_RJT accordingly */ 1241 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0); 1242 #endif 1243 return 0; 1244 } 1245 1246 void 1247 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 1248 { 1249 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1250 struct lpfc_nvmet_tgtport *tgtp; 1251 1252 if (phba->nvmet_support == 0) 1253 return; 1254 if (phba->targetport) { 1255 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1256 init_completion(&tgtp->tport_unreg_done); 1257 nvmet_fc_unregister_targetport(phba->targetport); 1258 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1259 lpfc_nvmet_cleanup_io_context(phba); 1260 } 1261 phba->targetport = NULL; 1262 #endif 1263 } 1264 1265 /** 1266 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer 1267 * @phba: pointer to lpfc hba data structure. 1268 * @pring: pointer to a SLI ring. 1269 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1270 * 1271 * This routine is used for processing the WQE associated with a unsolicited 1272 * event. It first determines whether there is an existing ndlp that matches 1273 * the DID from the unsolicited WQE. If not, it will create a new one with 1274 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1275 * WQE is then used to invoke the proper routine and to set up proper state 1276 * of the discovery state machine. 1277 **/ 1278 static void 1279 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1280 struct hbq_dmabuf *nvmebuf) 1281 { 1282 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1283 struct lpfc_nvmet_tgtport *tgtp; 1284 struct fc_frame_header *fc_hdr; 1285 struct lpfc_nvmet_rcv_ctx *ctxp; 1286 uint32_t *payload; 1287 uint32_t size, oxid, sid, rc; 1288 1289 if (!nvmebuf || !phba->targetport) { 1290 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1291 "6154 LS Drop IO\n"); 1292 oxid = 0; 1293 size = 0; 1294 sid = 0; 1295 ctxp = NULL; 1296 goto dropit; 1297 } 1298 1299 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1300 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1301 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1302 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); 1303 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1304 sid = sli4_sid_from_fc_hdr(fc_hdr); 1305 1306 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); 1307 if (ctxp == NULL) { 1308 atomic_inc(&tgtp->rcv_ls_req_drop); 1309 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1310 "6155 LS Drop IO x%x: Alloc\n", 1311 oxid); 1312 dropit: 1313 lpfc_nvmeio_data(phba, "NVMET LS DROP: " 1314 "xri x%x sz %d from %06x\n", 1315 oxid, size, sid); 1316 if (nvmebuf) 1317 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1318 return; 1319 } 1320 ctxp->phba = phba; 1321 ctxp->size = size; 1322 ctxp->oxid = oxid; 1323 ctxp->sid = sid; 1324 ctxp->wqeq = NULL; 1325 ctxp->state = LPFC_NVMET_STE_LS_RCV; 1326 ctxp->entry_cnt = 1; 1327 ctxp->rqb_buffer = (void *)nvmebuf; 1328 1329 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", 1330 oxid, size, sid); 1331 /* 1332 * The calling sequence should be: 1333 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done 1334 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp. 1335 */ 1336 atomic_inc(&tgtp->rcv_ls_req_in); 1337 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req, 1338 payload, size); 1339 1340 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1341 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x " 1342 "%08x %08x %08x\n", size, rc, 1343 *payload, *(payload+1), *(payload+2), 1344 *(payload+3), *(payload+4), *(payload+5)); 1345 1346 if (rc == 0) { 1347 atomic_inc(&tgtp->rcv_ls_req_out); 1348 return; 1349 } 1350 1351 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n", 1352 oxid, size, sid); 1353 1354 atomic_inc(&tgtp->rcv_ls_req_drop); 1355 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1356 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n", 1357 ctxp->oxid, rc); 1358 1359 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ 1360 if (nvmebuf) 1361 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1362 1363 atomic_inc(&tgtp->xmt_ls_abort); 1364 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); 1365 #endif 1366 } 1367 1368 /** 1369 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer 1370 * @phba: pointer to lpfc hba data structure. 1371 * @pring: pointer to a SLI ring. 1372 * @nvmebuf: pointer to lpfc nvme command HBQ data structure. 1373 * 1374 * This routine is used for processing the WQE associated with a unsolicited 1375 * event. It first determines whether there is an existing ndlp that matches 1376 * the DID from the unsolicited WQE. If not, it will create a new one with 1377 * the DID from the unsolicited WQE. The ELS command from the unsolicited 1378 * WQE is then used to invoke the proper routine and to set up proper state 1379 * of the discovery state machine. 1380 **/ 1381 static void 1382 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, 1383 struct lpfc_sli_ring *pring, 1384 struct rqb_dmabuf *nvmebuf, 1385 uint64_t isr_timestamp) 1386 { 1387 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 1388 struct lpfc_nvmet_rcv_ctx *ctxp; 1389 struct lpfc_nvmet_tgtport *tgtp; 1390 struct fc_frame_header *fc_hdr; 1391 struct lpfc_nvmet_ctxbuf *ctx_buf; 1392 uint32_t *payload; 1393 uint32_t size, oxid, sid, rc, qno; 1394 unsigned long iflag; 1395 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1396 uint32_t id; 1397 #endif 1398 1399 ctx_buf = NULL; 1400 if (!nvmebuf || !phba->targetport) { 1401 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1402 "6157 NVMET FCP Drop IO\n"); 1403 oxid = 0; 1404 size = 0; 1405 sid = 0; 1406 ctxp = NULL; 1407 goto dropit; 1408 } 1409 1410 spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); 1411 if (phba->sli4_hba.nvmet_ctx_get_cnt) { 1412 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list, 1413 ctx_buf, struct lpfc_nvmet_ctxbuf, list); 1414 phba->sli4_hba.nvmet_ctx_get_cnt--; 1415 } else { 1416 spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock); 1417 if (phba->sli4_hba.nvmet_ctx_put_cnt) { 1418 list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list, 1419 &phba->sli4_hba.lpfc_nvmet_ctx_get_list); 1420 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list); 1421 phba->sli4_hba.nvmet_ctx_get_cnt = 1422 phba->sli4_hba.nvmet_ctx_put_cnt; 1423 phba->sli4_hba.nvmet_ctx_put_cnt = 0; 1424 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 1425 1426 list_remove_head( 1427 &phba->sli4_hba.lpfc_nvmet_ctx_get_list, 1428 ctx_buf, struct lpfc_nvmet_ctxbuf, list); 1429 phba->sli4_hba.nvmet_ctx_get_cnt--; 1430 } else { 1431 spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock); 1432 } 1433 } 1434 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag); 1435 1436 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); 1437 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 1438 size = nvmebuf->bytes_recv; 1439 1440 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1441 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { 1442 id = smp_processor_id(); 1443 if (id < LPFC_CHECK_CPU_CNT) 1444 phba->cpucheck_rcv_io[id]++; 1445 } 1446 #endif 1447 1448 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n", 1449 oxid, size, smp_processor_id()); 1450 1451 if (!ctx_buf) { 1452 /* Queue this NVME IO to process later */ 1453 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); 1454 list_add_tail(&nvmebuf->hbuf.list, 1455 &phba->sli4_hba.lpfc_nvmet_io_wait_list); 1456 phba->sli4_hba.nvmet_io_wait_cnt++; 1457 phba->sli4_hba.nvmet_io_wait_total++; 1458 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, 1459 iflag); 1460 1461 /* Post a brand new DMA buffer to RQ */ 1462 qno = nvmebuf->idx; 1463 lpfc_post_rq_buffer( 1464 phba, phba->sli4_hba.nvmet_mrq_hdr[qno], 1465 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); 1466 return; 1467 } 1468 1469 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1470 payload = (uint32_t *)(nvmebuf->dbuf.virt); 1471 sid = sli4_sid_from_fc_hdr(fc_hdr); 1472 1473 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; 1474 if (ctxp->state != LPFC_NVMET_STE_FREE) { 1475 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1476 "6414 NVMET Context corrupt %d %d oxid x%x\n", 1477 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 1478 } 1479 ctxp->wqeq = NULL; 1480 ctxp->txrdy = NULL; 1481 ctxp->offset = 0; 1482 ctxp->phba = phba; 1483 ctxp->size = size; 1484 ctxp->oxid = oxid; 1485 ctxp->sid = sid; 1486 ctxp->state = LPFC_NVMET_STE_RCV; 1487 ctxp->entry_cnt = 1; 1488 ctxp->flag = 0; 1489 ctxp->ctxbuf = ctx_buf; 1490 spin_lock_init(&ctxp->ctxlock); 1491 1492 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1493 if (phba->ktime_on) { 1494 ctxp->ts_isr_cmd = isr_timestamp; 1495 ctxp->ts_cmd_nvme = ktime_get_ns(); 1496 ctxp->ts_nvme_data = 0; 1497 ctxp->ts_data_wqput = 0; 1498 ctxp->ts_isr_data = 0; 1499 ctxp->ts_data_nvme = 0; 1500 ctxp->ts_nvme_status = 0; 1501 ctxp->ts_status_wqput = 0; 1502 ctxp->ts_isr_status = 0; 1503 ctxp->ts_status_nvme = 0; 1504 } 1505 #endif 1506 1507 atomic_inc(&tgtp->rcv_fcp_cmd_in); 1508 /* 1509 * The calling sequence should be: 1510 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done 1511 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. 1512 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in 1513 * the NVME command / FC header is stored, so we are free to repost 1514 * the buffer. 1515 */ 1516 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, 1517 payload, size); 1518 1519 /* Process FCP command */ 1520 if (rc == 0) { 1521 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1522 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1523 return; 1524 } 1525 1526 /* Processing of FCP command is deferred */ 1527 if (rc == -EOVERFLOW) { 1528 lpfc_nvmeio_data(phba, 1529 "NVMET RCV BUSY: xri x%x sz %d from %06x\n", 1530 oxid, size, sid); 1531 /* defer reposting rcv buffer till .defer_rcv callback */ 1532 ctxp->rqb_buffer = nvmebuf; 1533 atomic_inc(&tgtp->rcv_fcp_cmd_out); 1534 return; 1535 } 1536 1537 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1539 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 1540 ctxp->oxid, rc, 1541 atomic_read(&tgtp->rcv_fcp_cmd_in), 1542 atomic_read(&tgtp->rcv_fcp_cmd_out), 1543 atomic_read(&tgtp->xmt_fcp_release)); 1544 dropit: 1545 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", 1546 oxid, size, sid); 1547 if (oxid) { 1548 lpfc_nvmet_defer_release(phba, ctxp); 1549 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); 1550 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1551 return; 1552 } 1553 1554 if (ctx_buf) 1555 lpfc_nvmet_ctxbuf_post(phba, ctx_buf); 1556 1557 if (nvmebuf) 1558 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ 1559 #endif 1560 } 1561 1562 /** 1563 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport 1564 * @phba: pointer to lpfc hba data structure. 1565 * @pring: pointer to a SLI ring. 1566 * @nvmebuf: pointer to received nvme data structure. 1567 * 1568 * This routine is used to process an unsolicited event received from a SLI 1569 * (Service Level Interface) ring. The actual processing of the data buffer 1570 * associated with the unsolicited event is done by invoking the routine 1571 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the 1572 * SLI RQ on which the unsolicited event was received. 1573 **/ 1574 void 1575 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1576 struct lpfc_iocbq *piocb) 1577 { 1578 struct lpfc_dmabuf *d_buf; 1579 struct hbq_dmabuf *nvmebuf; 1580 1581 d_buf = piocb->context2; 1582 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1583 1584 if (phba->nvmet_support == 0) { 1585 lpfc_in_buf_free(phba, &nvmebuf->dbuf); 1586 return; 1587 } 1588 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf); 1589 } 1590 1591 /** 1592 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport 1593 * @phba: pointer to lpfc hba data structure. 1594 * @pring: pointer to a SLI ring. 1595 * @nvmebuf: pointer to received nvme data structure. 1596 * 1597 * This routine is used to process an unsolicited event received from a SLI 1598 * (Service Level Interface) ring. The actual processing of the data buffer 1599 * associated with the unsolicited event is done by invoking the routine 1600 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the 1601 * SLI RQ on which the unsolicited event was received. 1602 **/ 1603 void 1604 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, 1605 struct lpfc_sli_ring *pring, 1606 struct rqb_dmabuf *nvmebuf, 1607 uint64_t isr_timestamp) 1608 { 1609 if (phba->nvmet_support == 0) { 1610 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); 1611 return; 1612 } 1613 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf, 1614 isr_timestamp); 1615 } 1616 1617 /** 1618 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure 1619 * @phba: pointer to a host N_Port data structure. 1620 * @ctxp: Context info for NVME LS Request 1621 * @rspbuf: DMA buffer of NVME command. 1622 * @rspsize: size of the NVME command. 1623 * 1624 * This routine is used for allocating a lpfc-WQE data structure from 1625 * the driver lpfc-WQE free-list and prepare the WQE with the parameters 1626 * passed into the routine for discovery state machine to issue an Extended 1627 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation 1628 * and preparation routine that is used by all the discovery state machine 1629 * routines and the NVME command-specific fields will be later set up by 1630 * the individual discovery machine routines after calling this routine 1631 * allocating and preparing a generic WQE data structure. It fills in the 1632 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 1633 * payload and response payload (if expected). The reference count on the 1634 * ndlp is incremented by 1 and the reference to the ndlp is put into 1635 * context1 of the WQE data structure for this WQE to hold the ndlp 1636 * reference for the command's callback function to access later. 1637 * 1638 * Return code 1639 * Pointer to the newly allocated/prepared nvme wqe data structure 1640 * NULL - when nvme wqe data structure allocation/preparation failed 1641 **/ 1642 static struct lpfc_iocbq * 1643 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba, 1644 struct lpfc_nvmet_rcv_ctx *ctxp, 1645 dma_addr_t rspbuf, uint16_t rspsize) 1646 { 1647 struct lpfc_nodelist *ndlp; 1648 struct lpfc_iocbq *nvmewqe; 1649 union lpfc_wqe *wqe; 1650 1651 if (!lpfc_is_link_up(phba)) { 1652 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1653 "6104 NVMET prep LS wqe: link err: " 1654 "NPORT x%x oxid:x%x ste %d\n", 1655 ctxp->sid, ctxp->oxid, ctxp->state); 1656 return NULL; 1657 } 1658 1659 /* Allocate buffer for command wqe */ 1660 nvmewqe = lpfc_sli_get_iocbq(phba); 1661 if (nvmewqe == NULL) { 1662 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1663 "6105 NVMET prep LS wqe: No WQE: " 1664 "NPORT x%x oxid x%x ste %d\n", 1665 ctxp->sid, ctxp->oxid, ctxp->state); 1666 return NULL; 1667 } 1668 1669 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1670 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1671 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1672 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1673 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, 1674 "6106 NVMET prep LS wqe: No ndlp: " 1675 "NPORT x%x oxid x%x ste %d\n", 1676 ctxp->sid, ctxp->oxid, ctxp->state); 1677 goto nvme_wqe_free_wqeq_exit; 1678 } 1679 ctxp->wqeq = nvmewqe; 1680 1681 /* prevent preparing wqe with NULL ndlp reference */ 1682 nvmewqe->context1 = lpfc_nlp_get(ndlp); 1683 if (nvmewqe->context1 == NULL) 1684 goto nvme_wqe_free_wqeq_exit; 1685 nvmewqe->context2 = ctxp; 1686 1687 wqe = &nvmewqe->wqe; 1688 memset(wqe, 0, sizeof(union lpfc_wqe)); 1689 1690 /* Words 0 - 2 */ 1691 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1692 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize; 1693 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf)); 1694 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf)); 1695 1696 /* Word 3 */ 1697 1698 /* Word 4 */ 1699 1700 /* Word 5 */ 1701 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0); 1702 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1); 1703 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0); 1704 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP); 1705 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME); 1706 1707 /* Word 6 */ 1708 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 1709 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1710 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag); 1711 1712 /* Word 7 */ 1713 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com, 1714 CMD_XMIT_SEQUENCE64_WQE); 1715 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI); 1716 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3); 1717 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 1718 1719 /* Word 8 */ 1720 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag; 1721 1722 /* Word 9 */ 1723 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag); 1724 /* Needs to be set by caller */ 1725 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid); 1726 1727 /* Word 10 */ 1728 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 1729 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 1730 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 1731 LPFC_WQE_LENLOC_WORD12); 1732 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 1733 1734 /* Word 11 */ 1735 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com, 1736 LPFC_WQE_CQ_ID_DEFAULT); 1737 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com, 1738 OTHER_COMMAND); 1739 1740 /* Word 12 */ 1741 wqe->xmit_sequence.xmit_len = rspsize; 1742 1743 nvmewqe->retry = 1; 1744 nvmewqe->vport = phba->pport; 1745 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 1746 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS; 1747 1748 /* Xmit NVMET response to remote NPORT <did> */ 1749 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, 1750 "6039 Xmit NVMET LS response to remote " 1751 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n", 1752 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid, 1753 rspsize); 1754 return nvmewqe; 1755 1756 nvme_wqe_free_wqeq_exit: 1757 nvmewqe->context2 = NULL; 1758 nvmewqe->context3 = NULL; 1759 lpfc_sli_release_iocbq(phba, nvmewqe); 1760 return NULL; 1761 } 1762 1763 1764 static struct lpfc_iocbq * 1765 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, 1766 struct lpfc_nvmet_rcv_ctx *ctxp) 1767 { 1768 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req; 1769 struct lpfc_nvmet_tgtport *tgtp; 1770 struct sli4_sge *sgl; 1771 struct lpfc_nodelist *ndlp; 1772 struct lpfc_iocbq *nvmewqe; 1773 struct scatterlist *sgel; 1774 union lpfc_wqe128 *wqe; 1775 uint32_t *txrdy; 1776 dma_addr_t physaddr; 1777 int i, cnt; 1778 int xc = 1; 1779 1780 if (!lpfc_is_link_up(phba)) { 1781 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1782 "6107 NVMET prep FCP wqe: link err:" 1783 "NPORT x%x oxid x%x ste %d\n", 1784 ctxp->sid, ctxp->oxid, ctxp->state); 1785 return NULL; 1786 } 1787 1788 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); 1789 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 1790 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 1791 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 1792 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1793 "6108 NVMET prep FCP wqe: no ndlp: " 1794 "NPORT x%x oxid x%x ste %d\n", 1795 ctxp->sid, ctxp->oxid, ctxp->state); 1796 return NULL; 1797 } 1798 1799 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) { 1800 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1801 "6109 NVMET prep FCP wqe: seg cnt err: " 1802 "NPORT x%x oxid x%x ste %d cnt %d\n", 1803 ctxp->sid, ctxp->oxid, ctxp->state, 1804 phba->cfg_nvme_seg_cnt); 1805 return NULL; 1806 } 1807 1808 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 1809 nvmewqe = ctxp->wqeq; 1810 if (nvmewqe == NULL) { 1811 /* Allocate buffer for command wqe */ 1812 nvmewqe = ctxp->ctxbuf->iocbq; 1813 if (nvmewqe == NULL) { 1814 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1815 "6110 NVMET prep FCP wqe: No " 1816 "WQE: NPORT x%x oxid x%x ste %d\n", 1817 ctxp->sid, ctxp->oxid, ctxp->state); 1818 return NULL; 1819 } 1820 ctxp->wqeq = nvmewqe; 1821 xc = 0; /* create new XRI */ 1822 nvmewqe->sli4_lxritag = NO_XRI; 1823 nvmewqe->sli4_xritag = NO_XRI; 1824 } 1825 1826 /* Sanity check */ 1827 if (((ctxp->state == LPFC_NVMET_STE_RCV) && 1828 (ctxp->entry_cnt == 1)) || 1829 (ctxp->state == LPFC_NVMET_STE_DATA)) { 1830 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe; 1831 } else { 1832 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1833 "6111 Wrong state NVMET FCP: %d cnt %d\n", 1834 ctxp->state, ctxp->entry_cnt); 1835 return NULL; 1836 } 1837 1838 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl; 1839 switch (rsp->op) { 1840 case NVMET_FCOP_READDATA: 1841 case NVMET_FCOP_READDATA_RSP: 1842 /* Words 0 - 2 : The first sg segment */ 1843 sgel = &rsp->sg[0]; 1844 physaddr = sg_dma_address(sgel); 1845 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1846 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel); 1847 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr)); 1848 wqe->fcp_tsend.bde.addrHigh = 1849 cpu_to_le32(putPaddrHigh(physaddr)); 1850 1851 /* Word 3 */ 1852 wqe->fcp_tsend.payload_offset_len = 0; 1853 1854 /* Word 4 */ 1855 wqe->fcp_tsend.relative_offset = ctxp->offset; 1856 1857 /* Word 5 */ 1858 1859 /* Word 6 */ 1860 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com, 1861 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1862 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com, 1863 nvmewqe->sli4_xritag); 1864 1865 /* Word 7 */ 1866 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); 1867 1868 /* Word 8 */ 1869 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag; 1870 1871 /* Word 9 */ 1872 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag); 1873 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid); 1874 1875 /* Word 10 */ 1876 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1877 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1); 1878 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE); 1879 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, 1880 LPFC_WQE_LENLOC_WORD12); 1881 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0); 1882 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc); 1883 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1); 1884 if (phba->cfg_nvme_oas) 1885 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1); 1886 1887 /* Word 11 */ 1888 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, 1889 LPFC_WQE_CQ_ID_DEFAULT); 1890 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, 1891 FCP_COMMAND_TSEND); 1892 1893 /* Word 12 */ 1894 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 1895 1896 /* Setup 2 SKIP SGEs */ 1897 sgl->addr_hi = 0; 1898 sgl->addr_lo = 0; 1899 sgl->word2 = 0; 1900 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1901 sgl->word2 = cpu_to_le32(sgl->word2); 1902 sgl->sge_len = 0; 1903 sgl++; 1904 sgl->addr_hi = 0; 1905 sgl->addr_lo = 0; 1906 sgl->word2 = 0; 1907 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 1908 sgl->word2 = cpu_to_le32(sgl->word2); 1909 sgl->sge_len = 0; 1910 sgl++; 1911 if (rsp->op == NVMET_FCOP_READDATA_RSP) { 1912 atomic_inc(&tgtp->xmt_fcp_read_rsp); 1913 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); 1914 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) && 1915 (rsp->rsplen == 12)) { 1916 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); 1917 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1918 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1919 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1920 } else { 1921 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1922 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1); 1923 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1); 1924 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 1925 ((rsp->rsplen >> 2) - 1)); 1926 memcpy(&wqe->words[16], rsp->rspaddr, 1927 rsp->rsplen); 1928 } 1929 } else { 1930 atomic_inc(&tgtp->xmt_fcp_read); 1931 1932 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 1933 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0); 1934 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0); 1935 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0); 1936 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0); 1937 } 1938 break; 1939 1940 case NVMET_FCOP_WRITEDATA: 1941 /* Words 0 - 2 : The first sg segment */ 1942 txrdy = pci_pool_alloc(phba->txrdy_payload_pool, 1943 GFP_KERNEL, &physaddr); 1944 if (!txrdy) { 1945 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1946 "6041 Bad txrdy buffer: oxid x%x\n", 1947 ctxp->oxid); 1948 return NULL; 1949 } 1950 ctxp->txrdy = txrdy; 1951 ctxp->txrdy_phys = physaddr; 1952 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1953 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; 1954 wqe->fcp_treceive.bde.addrLow = 1955 cpu_to_le32(putPaddrLow(physaddr)); 1956 wqe->fcp_treceive.bde.addrHigh = 1957 cpu_to_le32(putPaddrHigh(physaddr)); 1958 1959 /* Word 3 */ 1960 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN; 1961 1962 /* Word 4 */ 1963 wqe->fcp_treceive.relative_offset = ctxp->offset; 1964 1965 /* Word 5 */ 1966 1967 /* Word 6 */ 1968 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com, 1969 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 1970 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com, 1971 nvmewqe->sli4_xritag); 1972 1973 /* Word 7 */ 1974 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0); 1975 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, 1976 CMD_FCP_TRECEIVE64_WQE); 1977 1978 /* Word 8 */ 1979 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag; 1980 1981 /* Word 9 */ 1982 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag); 1983 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid); 1984 1985 /* Word 10 */ 1986 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1987 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1); 1988 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ); 1989 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, 1990 LPFC_WQE_LENLOC_WORD12); 1991 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc); 1992 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0); 1993 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0); 1994 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0); 1995 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1); 1996 if (phba->cfg_nvme_oas) 1997 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1); 1998 1999 /* Word 11 */ 2000 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, 2001 LPFC_WQE_CQ_ID_DEFAULT); 2002 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, 2003 FCP_COMMAND_TRECEIVE); 2004 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2005 2006 /* Word 12 */ 2007 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; 2008 2009 /* Setup 1 TXRDY and 1 SKIP SGE */ 2010 txrdy[0] = 0; 2011 txrdy[1] = cpu_to_be32(rsp->transfer_length); 2012 txrdy[2] = 0; 2013 2014 sgl->addr_hi = putPaddrHigh(physaddr); 2015 sgl->addr_lo = putPaddrLow(physaddr); 2016 sgl->word2 = 0; 2017 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2018 sgl->word2 = cpu_to_le32(sgl->word2); 2019 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); 2020 sgl++; 2021 sgl->addr_hi = 0; 2022 sgl->addr_lo = 0; 2023 sgl->word2 = 0; 2024 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); 2025 sgl->word2 = cpu_to_le32(sgl->word2); 2026 sgl->sge_len = 0; 2027 sgl++; 2028 atomic_inc(&tgtp->xmt_fcp_write); 2029 break; 2030 2031 case NVMET_FCOP_RSP: 2032 /* Words 0 - 2 */ 2033 physaddr = rsp->rspdma; 2034 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2035 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen; 2036 wqe->fcp_trsp.bde.addrLow = 2037 cpu_to_le32(putPaddrLow(physaddr)); 2038 wqe->fcp_trsp.bde.addrHigh = 2039 cpu_to_le32(putPaddrHigh(physaddr)); 2040 2041 /* Word 3 */ 2042 wqe->fcp_trsp.response_len = rsp->rsplen; 2043 2044 /* Word 4 */ 2045 wqe->fcp_trsp.rsvd_4_5[0] = 0; 2046 2047 2048 /* Word 5 */ 2049 2050 /* Word 6 */ 2051 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com, 2052 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2053 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com, 2054 nvmewqe->sli4_xritag); 2055 2056 /* Word 7 */ 2057 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); 2058 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE); 2059 2060 /* Word 8 */ 2061 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag; 2062 2063 /* Word 9 */ 2064 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag); 2065 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid); 2066 2067 /* Word 10 */ 2068 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2069 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0); 2070 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE); 2071 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, 2072 LPFC_WQE_LENLOC_WORD3); 2073 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc); 2074 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1); 2075 if (phba->cfg_nvme_oas) 2076 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1); 2077 2078 /* Word 11 */ 2079 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, 2080 LPFC_WQE_CQ_ID_DEFAULT); 2081 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, 2082 FCP_COMMAND_TRSP); 2083 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0); 2084 2085 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { 2086 /* Good response - all zero's on wire */ 2087 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0); 2088 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0); 2089 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0); 2090 } else { 2091 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1); 2092 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1); 2093 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 2094 ((rsp->rsplen >> 2) - 1)); 2095 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen); 2096 } 2097 2098 /* Use rspbuf, NOT sg list */ 2099 rsp->sg_cnt = 0; 2100 sgl->word2 = 0; 2101 atomic_inc(&tgtp->xmt_fcp_rsp); 2102 break; 2103 2104 default: 2105 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, 2106 "6064 Unknown Rsp Op %d\n", 2107 rsp->op); 2108 return NULL; 2109 } 2110 2111 nvmewqe->retry = 1; 2112 nvmewqe->vport = phba->pport; 2113 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT; 2114 nvmewqe->context1 = ndlp; 2115 2116 for (i = 0; i < rsp->sg_cnt; i++) { 2117 sgel = &rsp->sg[i]; 2118 physaddr = sg_dma_address(sgel); 2119 cnt = sg_dma_len(sgel); 2120 sgl->addr_hi = putPaddrHigh(physaddr); 2121 sgl->addr_lo = putPaddrLow(physaddr); 2122 sgl->word2 = 0; 2123 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2124 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset); 2125 if ((i+1) == rsp->sg_cnt) 2126 bf_set(lpfc_sli4_sge_last, sgl, 1); 2127 sgl->word2 = cpu_to_le32(sgl->word2); 2128 sgl->sge_len = cpu_to_le32(cnt); 2129 sgl++; 2130 ctxp->offset += cnt; 2131 } 2132 ctxp->state = LPFC_NVMET_STE_DATA; 2133 ctxp->entry_cnt++; 2134 return nvmewqe; 2135 } 2136 2137 /** 2138 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS 2139 * @phba: Pointer to HBA context object. 2140 * @cmdwqe: Pointer to driver command WQE object. 2141 * @wcqe: Pointer to driver response CQE object. 2142 * 2143 * The function is called from SLI ring event handler with no 2144 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2145 * The function frees memory resources used for the NVME commands. 2146 **/ 2147 static void 2148 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2149 struct lpfc_wcqe_complete *wcqe) 2150 { 2151 struct lpfc_nvmet_rcv_ctx *ctxp; 2152 struct lpfc_nvmet_tgtport *tgtp; 2153 uint32_t status, result; 2154 unsigned long flags; 2155 bool released = false; 2156 2157 ctxp = cmdwqe->context2; 2158 status = bf_get(lpfc_wcqe_c_status, wcqe); 2159 result = wcqe->parameter; 2160 2161 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2162 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2163 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2164 2165 ctxp->state = LPFC_NVMET_STE_DONE; 2166 2167 /* Check if we already received a free context call 2168 * and we have completed processing an abort situation. 2169 */ 2170 spin_lock_irqsave(&ctxp->ctxlock, flags); 2171 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2172 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2173 list_del(&ctxp->list); 2174 released = true; 2175 } 2176 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2177 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2178 atomic_inc(&tgtp->xmt_abort_rsp); 2179 2180 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2181 "6165 ABORT cmpl: xri x%x flg x%x (%d) " 2182 "WCQE: %08x %08x %08x %08x\n", 2183 ctxp->oxid, ctxp->flag, released, 2184 wcqe->word0, wcqe->total_data_placed, 2185 result, wcqe->word3); 2186 2187 cmdwqe->context2 = NULL; 2188 cmdwqe->context3 = NULL; 2189 /* 2190 * if transport has released ctx, then can reuse it. Otherwise, 2191 * will be recycled by transport release call. 2192 */ 2193 if (released) 2194 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2195 2196 /* This is the iocbq for the abort, not the command */ 2197 lpfc_sli_release_iocbq(phba, cmdwqe); 2198 2199 /* Since iaab/iaar are NOT set, there is no work left. 2200 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2201 * should have been called already. 2202 */ 2203 } 2204 2205 /** 2206 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS 2207 * @phba: Pointer to HBA context object. 2208 * @cmdwqe: Pointer to driver command WQE object. 2209 * @wcqe: Pointer to driver response CQE object. 2210 * 2211 * The function is called from SLI ring event handler with no 2212 * lock held. This function is the completion handler for NVME ABTS for FCP cmds 2213 * The function frees memory resources used for the NVME commands. 2214 **/ 2215 static void 2216 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2217 struct lpfc_wcqe_complete *wcqe) 2218 { 2219 struct lpfc_nvmet_rcv_ctx *ctxp; 2220 struct lpfc_nvmet_tgtport *tgtp; 2221 unsigned long flags; 2222 uint32_t status, result; 2223 bool released = false; 2224 2225 ctxp = cmdwqe->context2; 2226 status = bf_get(lpfc_wcqe_c_status, wcqe); 2227 result = wcqe->parameter; 2228 2229 if (!ctxp) { 2230 /* if context is clear, related io alrady complete */ 2231 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2232 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n", 2233 wcqe->word0, wcqe->total_data_placed, 2234 result, wcqe->word3); 2235 return; 2236 } 2237 2238 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2239 if (ctxp->flag & LPFC_NVMET_ABORT_OP) 2240 atomic_inc(&tgtp->xmt_fcp_abort_cmpl); 2241 2242 /* Sanity check */ 2243 if (ctxp->state != LPFC_NVMET_STE_ABORT) { 2244 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2245 "6112 ABTS Wrong state:%d oxid x%x\n", 2246 ctxp->state, ctxp->oxid); 2247 } 2248 2249 /* Check if we already received a free context call 2250 * and we have completed processing an abort situation. 2251 */ 2252 ctxp->state = LPFC_NVMET_STE_DONE; 2253 spin_lock_irqsave(&ctxp->ctxlock, flags); 2254 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && 2255 !(ctxp->flag & LPFC_NVMET_XBUSY)) { 2256 list_del(&ctxp->list); 2257 released = true; 2258 } 2259 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2260 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 2261 atomic_inc(&tgtp->xmt_abort_rsp); 2262 2263 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2264 "6316 ABTS cmpl xri x%x flg x%x (%x) " 2265 "WCQE: %08x %08x %08x %08x\n", 2266 ctxp->oxid, ctxp->flag, released, 2267 wcqe->word0, wcqe->total_data_placed, 2268 result, wcqe->word3); 2269 2270 cmdwqe->context2 = NULL; 2271 cmdwqe->context3 = NULL; 2272 /* 2273 * if transport has released ctx, then can reuse it. Otherwise, 2274 * will be recycled by transport release call. 2275 */ 2276 if (released) 2277 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 2278 2279 /* Since iaab/iaar are NOT set, there is no work left. 2280 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted 2281 * should have been called already. 2282 */ 2283 } 2284 2285 /** 2286 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS 2287 * @phba: Pointer to HBA context object. 2288 * @cmdwqe: Pointer to driver command WQE object. 2289 * @wcqe: Pointer to driver response CQE object. 2290 * 2291 * The function is called from SLI ring event handler with no 2292 * lock held. This function is the completion handler for NVME ABTS for LS cmds 2293 * The function frees memory resources used for the NVME commands. 2294 **/ 2295 static void 2296 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, 2297 struct lpfc_wcqe_complete *wcqe) 2298 { 2299 struct lpfc_nvmet_rcv_ctx *ctxp; 2300 struct lpfc_nvmet_tgtport *tgtp; 2301 uint32_t status, result; 2302 2303 ctxp = cmdwqe->context2; 2304 status = bf_get(lpfc_wcqe_c_status, wcqe); 2305 result = wcqe->parameter; 2306 2307 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2308 atomic_inc(&tgtp->xmt_ls_abort_cmpl); 2309 2310 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2311 "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", 2312 ctxp, wcqe->word0, wcqe->total_data_placed, 2313 result, wcqe->word3); 2314 2315 if (!ctxp) { 2316 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2317 "6415 NVMET LS Abort No ctx: WCQE: " 2318 "%08x %08x %08x %08x\n", 2319 wcqe->word0, wcqe->total_data_placed, 2320 result, wcqe->word3); 2321 2322 lpfc_sli_release_iocbq(phba, cmdwqe); 2323 return; 2324 } 2325 2326 if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) { 2327 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2328 "6416 NVMET LS abort cmpl state mismatch: " 2329 "oxid x%x: %d %d\n", 2330 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2331 } 2332 2333 cmdwqe->context2 = NULL; 2334 cmdwqe->context3 = NULL; 2335 lpfc_sli_release_iocbq(phba, cmdwqe); 2336 kfree(ctxp); 2337 } 2338 2339 static int 2340 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba, 2341 struct lpfc_nvmet_rcv_ctx *ctxp, 2342 uint32_t sid, uint16_t xri) 2343 { 2344 struct lpfc_nvmet_tgtport *tgtp; 2345 struct lpfc_iocbq *abts_wqeq; 2346 union lpfc_wqe *wqe_abts; 2347 struct lpfc_nodelist *ndlp; 2348 2349 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2350 "6067 ABTS: sid %x xri x%x/x%x\n", 2351 sid, xri, ctxp->wqeq->sli4_xritag); 2352 2353 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2354 2355 ndlp = lpfc_findnode_did(phba->pport, sid); 2356 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2357 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2358 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2359 atomic_inc(&tgtp->xmt_abort_rsp_error); 2360 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2361 "6134 Drop ABTS - wrong NDLP state x%x.\n", 2362 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2363 2364 /* No failure to an ABTS request. */ 2365 return 0; 2366 } 2367 2368 abts_wqeq = ctxp->wqeq; 2369 wqe_abts = &abts_wqeq->wqe; 2370 2371 /* 2372 * Since we zero the whole WQE, we need to ensure we set the WQE fields 2373 * that were initialized in lpfc_sli4_nvmet_alloc. 2374 */ 2375 memset(wqe_abts, 0, sizeof(union lpfc_wqe)); 2376 2377 /* Word 5 */ 2378 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0); 2379 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1); 2380 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0); 2381 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS); 2382 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS); 2383 2384 /* Word 6 */ 2385 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com, 2386 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2387 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com, 2388 abts_wqeq->sli4_xritag); 2389 2390 /* Word 7 */ 2391 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com, 2392 CMD_XMIT_SEQUENCE64_WQE); 2393 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI); 2394 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3); 2395 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0); 2396 2397 /* Word 8 */ 2398 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag; 2399 2400 /* Word 9 */ 2401 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag); 2402 /* Needs to be set by caller */ 2403 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri); 2404 2405 /* Word 10 */ 2406 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1); 2407 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE); 2408 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com, 2409 LPFC_WQE_LENLOC_WORD12); 2410 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0); 2411 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0); 2412 2413 /* Word 11 */ 2414 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com, 2415 LPFC_WQE_CQ_ID_DEFAULT); 2416 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com, 2417 OTHER_COMMAND); 2418 2419 abts_wqeq->vport = phba->pport; 2420 abts_wqeq->context1 = ndlp; 2421 abts_wqeq->context2 = ctxp; 2422 abts_wqeq->context3 = NULL; 2423 abts_wqeq->rsvd2 = 0; 2424 /* hba_wqidx should already be setup from command we are aborting */ 2425 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR; 2426 abts_wqeq->iocb.ulpLe = 1; 2427 2428 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2429 "6069 Issue ABTS to xri x%x reqtag x%x\n", 2430 xri, abts_wqeq->iotag); 2431 return 1; 2432 } 2433 2434 static int 2435 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, 2436 struct lpfc_nvmet_rcv_ctx *ctxp, 2437 uint32_t sid, uint16_t xri) 2438 { 2439 struct lpfc_nvmet_tgtport *tgtp; 2440 struct lpfc_iocbq *abts_wqeq; 2441 union lpfc_wqe *abts_wqe; 2442 struct lpfc_nodelist *ndlp; 2443 unsigned long flags; 2444 int rc; 2445 2446 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2447 if (!ctxp->wqeq) { 2448 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2449 ctxp->wqeq->hba_wqidx = 0; 2450 } 2451 2452 ndlp = lpfc_findnode_did(phba->pport, sid); 2453 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2454 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 2455 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) { 2456 atomic_inc(&tgtp->xmt_abort_rsp_error); 2457 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2458 "6160 Drop ABORT - wrong NDLP state x%x.\n", 2459 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); 2460 2461 /* No failure to an ABTS request. */ 2462 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2463 return 0; 2464 } 2465 2466 /* Issue ABTS for this WQE based on iotag */ 2467 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); 2468 if (!ctxp->abort_wqeq) { 2469 atomic_inc(&tgtp->xmt_abort_rsp_error); 2470 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2471 "6161 ABORT failed: No wqeqs: " 2472 "xri: x%x\n", ctxp->oxid); 2473 /* No failure to an ABTS request. */ 2474 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2475 return 0; 2476 } 2477 abts_wqeq = ctxp->abort_wqeq; 2478 abts_wqe = &abts_wqeq->wqe; 2479 ctxp->state = LPFC_NVMET_STE_ABORT; 2480 2481 /* Announce entry to new IO submit field. */ 2482 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, 2483 "6162 ABORT Request to rport DID x%06x " 2484 "for xri x%x x%x\n", 2485 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag); 2486 2487 /* If the hba is getting reset, this flag is set. It is 2488 * cleared when the reset is complete and rings reestablished. 2489 */ 2490 spin_lock_irqsave(&phba->hbalock, flags); 2491 /* driver queued commands are in process of being flushed */ 2492 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { 2493 spin_unlock_irqrestore(&phba->hbalock, flags); 2494 atomic_inc(&tgtp->xmt_abort_rsp_error); 2495 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2496 "6163 Driver in reset cleanup - flushing " 2497 "NVME Req now. hba_flag x%x oxid x%x\n", 2498 phba->hba_flag, ctxp->oxid); 2499 lpfc_sli_release_iocbq(phba, abts_wqeq); 2500 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2501 return 0; 2502 } 2503 2504 /* Outstanding abort is in progress */ 2505 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) { 2506 spin_unlock_irqrestore(&phba->hbalock, flags); 2507 atomic_inc(&tgtp->xmt_abort_rsp_error); 2508 lpfc_printf_log(phba, KERN_ERR, LOG_NVME, 2509 "6164 Outstanding NVME I/O Abort Request " 2510 "still pending on oxid x%x\n", 2511 ctxp->oxid); 2512 lpfc_sli_release_iocbq(phba, abts_wqeq); 2513 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2514 return 0; 2515 } 2516 2517 /* Ready - mark outstanding as aborted by driver. */ 2518 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; 2519 2520 /* WQEs are reused. Clear stale data and set key fields to 2521 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. 2522 */ 2523 memset(abts_wqe, 0, sizeof(union lpfc_wqe)); 2524 2525 /* word 3 */ 2526 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 2527 2528 /* word 7 */ 2529 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 2530 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 2531 2532 /* word 8 - tell the FW to abort the IO associated with this 2533 * outstanding exchange ID. 2534 */ 2535 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; 2536 2537 /* word 9 - this is the iotag for the abts_wqe completion. */ 2538 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 2539 abts_wqeq->iotag); 2540 2541 /* word 10 */ 2542 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 2543 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 2544 2545 /* word 11 */ 2546 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 2547 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 2548 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 2549 2550 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 2551 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; 2552 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; 2553 abts_wqeq->iocb_cmpl = 0; 2554 abts_wqeq->iocb_flag |= LPFC_IO_NVME; 2555 abts_wqeq->context2 = ctxp; 2556 abts_wqeq->vport = phba->pport; 2557 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2558 spin_unlock_irqrestore(&phba->hbalock, flags); 2559 if (rc == WQE_SUCCESS) { 2560 atomic_inc(&tgtp->xmt_abort_sol); 2561 return 0; 2562 } 2563 2564 atomic_inc(&tgtp->xmt_abort_rsp_error); 2565 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2566 lpfc_sli_release_iocbq(phba, abts_wqeq); 2567 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2568 "6166 Failed ABORT issue_wqe with status x%x " 2569 "for oxid x%x.\n", 2570 rc, ctxp->oxid); 2571 return 1; 2572 } 2573 2574 2575 static int 2576 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, 2577 struct lpfc_nvmet_rcv_ctx *ctxp, 2578 uint32_t sid, uint16_t xri) 2579 { 2580 struct lpfc_nvmet_tgtport *tgtp; 2581 struct lpfc_iocbq *abts_wqeq; 2582 unsigned long flags; 2583 int rc; 2584 2585 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2586 if (!ctxp->wqeq) { 2587 ctxp->wqeq = ctxp->ctxbuf->iocbq; 2588 ctxp->wqeq->hba_wqidx = 0; 2589 } 2590 2591 if (ctxp->state == LPFC_NVMET_STE_FREE) { 2592 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2593 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n", 2594 ctxp->state, ctxp->entry_cnt, ctxp->oxid); 2595 rc = WQE_BUSY; 2596 goto aerr; 2597 } 2598 ctxp->state = LPFC_NVMET_STE_ABORT; 2599 ctxp->entry_cnt++; 2600 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri); 2601 if (rc == 0) 2602 goto aerr; 2603 2604 spin_lock_irqsave(&phba->hbalock, flags); 2605 abts_wqeq = ctxp->wqeq; 2606 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; 2607 abts_wqeq->iocb_cmpl = NULL; 2608 abts_wqeq->iocb_flag |= LPFC_IO_NVMET; 2609 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); 2610 spin_unlock_irqrestore(&phba->hbalock, flags); 2611 if (rc == WQE_SUCCESS) { 2612 return 0; 2613 } 2614 2615 aerr: 2616 ctxp->flag &= ~LPFC_NVMET_ABORT_OP; 2617 atomic_inc(&tgtp->xmt_abort_rsp_error); 2618 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2619 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", 2620 ctxp->oxid, rc); 2621 return 1; 2622 } 2623 2624 static int 2625 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, 2626 struct lpfc_nvmet_rcv_ctx *ctxp, 2627 uint32_t sid, uint16_t xri) 2628 { 2629 struct lpfc_nvmet_tgtport *tgtp; 2630 struct lpfc_iocbq *abts_wqeq; 2631 union lpfc_wqe *wqe_abts; 2632 unsigned long flags; 2633 int rc; 2634 2635 if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) || 2636 (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) { 2637 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2638 ctxp->entry_cnt++; 2639 } else { 2640 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 2641 "6418 NVMET LS abort state mismatch " 2642 "IO x%x: %d %d\n", 2643 ctxp->oxid, ctxp->state, ctxp->entry_cnt); 2644 ctxp->state = LPFC_NVMET_STE_LS_ABORT; 2645 } 2646 2647 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; 2648 if (!ctxp->wqeq) { 2649 /* Issue ABTS for this WQE based on iotag */ 2650 ctxp->wqeq = lpfc_sli_get_iocbq(phba); 2651 if (!ctxp->wqeq) { 2652 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2653 "6068 Abort failed: No wqeqs: " 2654 "xri: x%x\n", xri); 2655 /* No failure to an ABTS request. */ 2656 kfree(ctxp); 2657 return 0; 2658 } 2659 } 2660 abts_wqeq = ctxp->wqeq; 2661 wqe_abts = &abts_wqeq->wqe; 2662 2663 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) { 2664 rc = WQE_BUSY; 2665 goto out; 2666 } 2667 2668 spin_lock_irqsave(&phba->hbalock, flags); 2669 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; 2670 abts_wqeq->iocb_cmpl = 0; 2671 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; 2672 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); 2673 spin_unlock_irqrestore(&phba->hbalock, flags); 2674 if (rc == WQE_SUCCESS) { 2675 atomic_inc(&tgtp->xmt_abort_unsol); 2676 return 0; 2677 } 2678 out: 2679 atomic_inc(&tgtp->xmt_abort_rsp_error); 2680 abts_wqeq->context2 = NULL; 2681 abts_wqeq->context3 = NULL; 2682 lpfc_sli_release_iocbq(phba, abts_wqeq); 2683 kfree(ctxp); 2684 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, 2685 "6056 Failed to Issue ABTS. Status x%x\n", rc); 2686 return 0; 2687 } 2688