1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <linux/blk-cgroup.h> 32 #include <net/checksum.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_eh.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_version.h" 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 54 #define LPFC_RESET_WAIT 2 55 #define LPFC_ABORT_WAIT 2 56 57 static char *dif_op_str[] = { 58 "PROT_NORMAL", 59 "PROT_READ_INSERT", 60 "PROT_WRITE_STRIP", 61 "PROT_READ_STRIP", 62 "PROT_WRITE_INSERT", 63 "PROT_READ_PASS", 64 "PROT_WRITE_PASS", 65 }; 66 67 struct scsi_dif_tuple { 68 __be16 guard_tag; /* Checksum */ 69 __be16 app_tag; /* Opaque storage */ 70 __be32 ref_tag; /* Target LBA or indirect LBA */ 71 }; 72 73 static struct lpfc_rport_data * 74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 75 { 76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 77 78 if (vport->phba->cfg_fof) 79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 80 else 81 return (struct lpfc_rport_data *)sdev->hostdata; 82 } 83 84 static void 85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 86 static void 87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 88 static int 89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 90 91 /** 92 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 93 * @phba: Pointer to HBA object. 94 * @lpfc_cmd: lpfc scsi command object pointer. 95 * 96 * This function is called from the lpfc_prep_task_mgmt_cmd function to 97 * set the last bit in the response sge entry. 98 **/ 99 static void 100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 101 struct lpfc_io_buf *lpfc_cmd) 102 { 103 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 104 if (sgl) { 105 sgl += 1; 106 sgl->word2 = le32_to_cpu(sgl->word2); 107 bf_set(lpfc_sli4_sge_last, sgl, 1); 108 sgl->word2 = cpu_to_le32(sgl->word2); 109 } 110 } 111 112 /** 113 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 114 * @phba: The Hba for which this call is being executed. 115 * 116 * This routine is called when there is resource error in driver or firmware. 117 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 118 * posts at most 1 event each second. This routine wakes up worker thread of 119 * @phba to process WORKER_RAM_DOWN_EVENT event. 120 * 121 * This routine should be called with no lock held. 122 **/ 123 void 124 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 125 { 126 unsigned long flags; 127 uint32_t evt_posted; 128 unsigned long expires; 129 130 spin_lock_irqsave(&phba->hbalock, flags); 131 atomic_inc(&phba->num_rsrc_err); 132 phba->last_rsrc_error_time = jiffies; 133 134 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 135 if (time_after(expires, jiffies)) { 136 spin_unlock_irqrestore(&phba->hbalock, flags); 137 return; 138 } 139 140 phba->last_ramp_down_time = jiffies; 141 142 spin_unlock_irqrestore(&phba->hbalock, flags); 143 144 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 145 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 146 if (!evt_posted) 147 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 148 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 149 150 if (!evt_posted) 151 lpfc_worker_wake_up(phba); 152 return; 153 } 154 155 /** 156 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 157 * @phba: The Hba for which this call is being executed. 158 * 159 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 160 * thread.This routine reduces queue depth for all scsi device on each vport 161 * associated with @phba. 162 **/ 163 void 164 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 165 { 166 struct lpfc_vport **vports; 167 struct Scsi_Host *shost; 168 struct scsi_device *sdev; 169 unsigned long new_queue_depth; 170 unsigned long num_rsrc_err; 171 int i; 172 173 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 174 175 /* 176 * The error and success command counters are global per 177 * driver instance. If another handler has already 178 * operated on this error event, just exit. 179 */ 180 if (num_rsrc_err == 0) 181 return; 182 183 vports = lpfc_create_vport_work_array(phba); 184 if (vports != NULL) 185 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 186 shost = lpfc_shost_from_vport(vports[i]); 187 shost_for_each_device(sdev, shost) { 188 if (num_rsrc_err >= sdev->queue_depth) 189 new_queue_depth = 1; 190 else 191 new_queue_depth = sdev->queue_depth - 192 num_rsrc_err; 193 scsi_change_queue_depth(sdev, new_queue_depth); 194 } 195 } 196 lpfc_destroy_vport_work_array(phba, vports); 197 atomic_set(&phba->num_rsrc_err, 0); 198 } 199 200 /** 201 * lpfc_scsi_dev_block - set all scsi hosts to block state 202 * @phba: Pointer to HBA context object. 203 * 204 * This function walks vport list and set each SCSI host to block state 205 * by invoking fc_remote_port_delete() routine. This function is invoked 206 * with EEH when device's PCI slot has been permanently disabled. 207 **/ 208 void 209 lpfc_scsi_dev_block(struct lpfc_hba *phba) 210 { 211 struct lpfc_vport **vports; 212 struct Scsi_Host *shost; 213 struct scsi_device *sdev; 214 struct fc_rport *rport; 215 int i; 216 217 vports = lpfc_create_vport_work_array(phba); 218 if (vports != NULL) 219 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 220 shost = lpfc_shost_from_vport(vports[i]); 221 shost_for_each_device(sdev, shost) { 222 rport = starget_to_rport(scsi_target(sdev)); 223 fc_remote_port_delete(rport); 224 } 225 } 226 lpfc_destroy_vport_work_array(phba, vports); 227 } 228 229 /** 230 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 231 * @vport: The virtual port for which this call being executed. 232 * @num_to_alloc: The requested number of buffers to allocate. 233 * 234 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 235 * the scsi buffer contains all the necessary information needed to initiate 236 * a SCSI I/O. The non-DMAable buffer region contains information to build 237 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 238 * and the initial BPL. In addition to allocating memory, the FCP CMND and 239 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 240 * 241 * Return codes: 242 * int - number of scsi buffers that were allocated. 243 * 0 = failure, less than num_to_alloc is a partial failure. 244 **/ 245 static int 246 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 247 { 248 struct lpfc_hba *phba = vport->phba; 249 struct lpfc_io_buf *psb; 250 struct ulp_bde64 *bpl; 251 IOCB_t *iocb; 252 dma_addr_t pdma_phys_fcp_cmd; 253 dma_addr_t pdma_phys_fcp_rsp; 254 dma_addr_t pdma_phys_sgl; 255 uint16_t iotag; 256 int bcnt, bpl_size; 257 258 bpl_size = phba->cfg_sg_dma_buf_size - 259 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 260 261 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 262 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 263 num_to_alloc, phba->cfg_sg_dma_buf_size, 264 (int)sizeof(struct fcp_cmnd), 265 (int)sizeof(struct fcp_rsp), bpl_size); 266 267 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 268 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 269 if (!psb) 270 break; 271 272 /* 273 * Get memory from the pci pool to map the virt space to pci 274 * bus space for an I/O. The DMA buffer includes space for the 275 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 276 * necessary to support the sg_tablesize. 277 */ 278 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 279 GFP_KERNEL, &psb->dma_handle); 280 if (!psb->data) { 281 kfree(psb); 282 break; 283 } 284 285 286 /* Allocate iotag for psb->cur_iocbq. */ 287 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 288 if (iotag == 0) { 289 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 290 psb->data, psb->dma_handle); 291 kfree(psb); 292 break; 293 } 294 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; 295 296 psb->fcp_cmnd = psb->data; 297 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 298 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 299 sizeof(struct fcp_rsp); 300 301 /* Initialize local short-hand pointers. */ 302 bpl = (struct ulp_bde64 *)psb->dma_sgl; 303 pdma_phys_fcp_cmd = psb->dma_handle; 304 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 305 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 306 sizeof(struct fcp_rsp); 307 308 /* 309 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 310 * are sg list bdes. Initialize the first two and leave the 311 * rest for queuecommand. 312 */ 313 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 314 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 315 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 316 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 317 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 318 319 /* Setup the physical region for the FCP RSP */ 320 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 321 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 322 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 323 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 324 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 325 326 /* 327 * Since the IOCB for the FCP I/O is built into this 328 * lpfc_scsi_buf, initialize it with all known data now. 329 */ 330 iocb = &psb->cur_iocbq.iocb; 331 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 332 if ((phba->sli_rev == 3) && 333 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 334 /* fill in immediate fcp command BDE */ 335 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 336 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 337 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 338 unsli3.fcp_ext.icd); 339 iocb->un.fcpi64.bdl.addrHigh = 0; 340 iocb->ulpBdeCount = 0; 341 iocb->ulpLe = 0; 342 /* fill in response BDE */ 343 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 344 BUFF_TYPE_BDE_64; 345 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 346 sizeof(struct fcp_rsp); 347 iocb->unsli3.fcp_ext.rbde.addrLow = 348 putPaddrLow(pdma_phys_fcp_rsp); 349 iocb->unsli3.fcp_ext.rbde.addrHigh = 350 putPaddrHigh(pdma_phys_fcp_rsp); 351 } else { 352 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 353 iocb->un.fcpi64.bdl.bdeSize = 354 (2 * sizeof(struct ulp_bde64)); 355 iocb->un.fcpi64.bdl.addrLow = 356 putPaddrLow(pdma_phys_sgl); 357 iocb->un.fcpi64.bdl.addrHigh = 358 putPaddrHigh(pdma_phys_sgl); 359 iocb->ulpBdeCount = 1; 360 iocb->ulpLe = 1; 361 } 362 iocb->ulpClass = CLASS3; 363 psb->status = IOSTAT_SUCCESS; 364 /* Put it back into the SCSI buffer list */ 365 psb->cur_iocbq.io_buf = psb; 366 spin_lock_init(&psb->buf_lock); 367 lpfc_release_scsi_buf_s3(phba, psb); 368 369 } 370 371 return bcnt; 372 } 373 374 /** 375 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 376 * @vport: pointer to lpfc vport data structure. 377 * 378 * This routine is invoked by the vport cleanup for deletions and the cleanup 379 * for an ndlp on removal. 380 **/ 381 void 382 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 383 { 384 struct lpfc_hba *phba = vport->phba; 385 struct lpfc_io_buf *psb, *next_psb; 386 struct lpfc_sli4_hdw_queue *qp; 387 unsigned long iflag = 0; 388 int idx; 389 390 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 391 return; 392 393 spin_lock_irqsave(&phba->hbalock, iflag); 394 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 395 qp = &phba->sli4_hba.hdwq[idx]; 396 397 spin_lock(&qp->abts_io_buf_list_lock); 398 list_for_each_entry_safe(psb, next_psb, 399 &qp->lpfc_abts_io_buf_list, list) { 400 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) 401 continue; 402 403 if (psb->rdata && psb->rdata->pnode && 404 psb->rdata->pnode->vport == vport) 405 psb->rdata = NULL; 406 } 407 spin_unlock(&qp->abts_io_buf_list_lock); 408 } 409 spin_unlock_irqrestore(&phba->hbalock, iflag); 410 } 411 412 /** 413 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 414 * @phba: pointer to lpfc hba data structure. 415 * @axri: pointer to the fcp xri abort wcqe structure. 416 * @idx: index into hdwq 417 * 418 * This routine is invoked by the worker thread to process a SLI4 fast-path 419 * FCP or NVME aborted xri. 420 **/ 421 void 422 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 423 struct sli4_wcqe_xri_aborted *axri, int idx) 424 { 425 u16 xri = 0; 426 u16 rxid = 0; 427 struct lpfc_io_buf *psb, *next_psb; 428 struct lpfc_sli4_hdw_queue *qp; 429 unsigned long iflag = 0; 430 struct lpfc_iocbq *iocbq; 431 int i; 432 struct lpfc_nodelist *ndlp; 433 int rrq_empty = 0; 434 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 435 struct scsi_cmnd *cmd; 436 int offline = 0; 437 438 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 439 return; 440 offline = pci_channel_offline(phba->pcidev); 441 if (!offline) { 442 xri = bf_get(lpfc_wcqe_xa_xri, axri); 443 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 444 } 445 qp = &phba->sli4_hba.hdwq[idx]; 446 spin_lock_irqsave(&phba->hbalock, iflag); 447 spin_lock(&qp->abts_io_buf_list_lock); 448 list_for_each_entry_safe(psb, next_psb, 449 &qp->lpfc_abts_io_buf_list, list) { 450 if (offline) 451 xri = psb->cur_iocbq.sli4_xritag; 452 if (psb->cur_iocbq.sli4_xritag == xri) { 453 list_del_init(&psb->list); 454 psb->flags &= ~LPFC_SBUF_XBUSY; 455 psb->status = IOSTAT_SUCCESS; 456 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { 457 qp->abts_nvme_io_bufs--; 458 spin_unlock(&qp->abts_io_buf_list_lock); 459 spin_unlock_irqrestore(&phba->hbalock, iflag); 460 if (!offline) { 461 lpfc_sli4_nvme_xri_aborted(phba, axri, 462 psb); 463 return; 464 } 465 lpfc_sli4_nvme_pci_offline_aborted(phba, psb); 466 spin_lock_irqsave(&phba->hbalock, iflag); 467 spin_lock(&qp->abts_io_buf_list_lock); 468 continue; 469 } 470 qp->abts_scsi_io_bufs--; 471 spin_unlock(&qp->abts_io_buf_list_lock); 472 473 if (psb->rdata && psb->rdata->pnode) 474 ndlp = psb->rdata->pnode; 475 else 476 ndlp = NULL; 477 478 rrq_empty = list_empty(&phba->active_rrq_list); 479 spin_unlock_irqrestore(&phba->hbalock, iflag); 480 if (ndlp && !offline) { 481 lpfc_set_rrq_active(phba, ndlp, 482 psb->cur_iocbq.sli4_lxritag, rxid, 1); 483 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 484 } 485 486 if (phba->cfg_fcp_wait_abts_rsp || offline) { 487 spin_lock_irqsave(&psb->buf_lock, iflag); 488 cmd = psb->pCmd; 489 psb->pCmd = NULL; 490 spin_unlock_irqrestore(&psb->buf_lock, iflag); 491 492 /* The sdev is not guaranteed to be valid post 493 * scsi_done upcall. 494 */ 495 if (cmd) 496 scsi_done(cmd); 497 498 /* 499 * We expect there is an abort thread waiting 500 * for command completion wake up the thread. 501 */ 502 spin_lock_irqsave(&psb->buf_lock, iflag); 503 psb->cur_iocbq.cmd_flag &= 504 ~LPFC_DRIVER_ABORTED; 505 if (psb->waitq) 506 wake_up(psb->waitq); 507 spin_unlock_irqrestore(&psb->buf_lock, iflag); 508 } 509 510 lpfc_release_scsi_buf_s4(phba, psb); 511 if (rrq_empty) 512 lpfc_worker_wake_up(phba); 513 if (!offline) 514 return; 515 spin_lock_irqsave(&phba->hbalock, iflag); 516 spin_lock(&qp->abts_io_buf_list_lock); 517 continue; 518 } 519 } 520 spin_unlock(&qp->abts_io_buf_list_lock); 521 if (!offline) { 522 for (i = 1; i <= phba->sli.last_iotag; i++) { 523 iocbq = phba->sli.iocbq_lookup[i]; 524 525 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 526 (iocbq->cmd_flag & LPFC_IO_LIBDFC)) 527 continue; 528 if (iocbq->sli4_xritag != xri) 529 continue; 530 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 531 psb->flags &= ~LPFC_SBUF_XBUSY; 532 spin_unlock_irqrestore(&phba->hbalock, iflag); 533 if (!list_empty(&pring->txq)) 534 lpfc_worker_wake_up(phba); 535 return; 536 } 537 } 538 spin_unlock_irqrestore(&phba->hbalock, iflag); 539 } 540 541 /** 542 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 543 * @phba: The HBA for which this call is being executed. 544 * @ndlp: pointer to a node-list data structure. 545 * @cmnd: Pointer to scsi_cmnd data structure. 546 * 547 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 548 * and returns to caller. 549 * 550 * Return codes: 551 * NULL - Error 552 * Pointer to lpfc_scsi_buf - Success 553 **/ 554 static struct lpfc_io_buf * 555 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 556 struct scsi_cmnd *cmnd) 557 { 558 struct lpfc_io_buf *lpfc_cmd = NULL; 559 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 560 unsigned long iflag = 0; 561 562 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 563 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 564 list); 565 if (!lpfc_cmd) { 566 spin_lock(&phba->scsi_buf_list_put_lock); 567 list_splice(&phba->lpfc_scsi_buf_list_put, 568 &phba->lpfc_scsi_buf_list_get); 569 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 570 list_remove_head(scsi_buf_list_get, lpfc_cmd, 571 struct lpfc_io_buf, list); 572 spin_unlock(&phba->scsi_buf_list_put_lock); 573 } 574 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 575 576 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 577 atomic_inc(&ndlp->cmd_pending); 578 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 579 } 580 return lpfc_cmd; 581 } 582 /** 583 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 584 * @phba: The HBA for which this call is being executed. 585 * @ndlp: pointer to a node-list data structure. 586 * @cmnd: Pointer to scsi_cmnd data structure. 587 * 588 * This routine removes a scsi buffer from head of @hdwq io_buf_list 589 * and returns to caller. 590 * 591 * Return codes: 592 * NULL - Error 593 * Pointer to lpfc_scsi_buf - Success 594 **/ 595 static struct lpfc_io_buf * 596 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 597 struct scsi_cmnd *cmnd) 598 { 599 struct lpfc_io_buf *lpfc_cmd; 600 struct lpfc_sli4_hdw_queue *qp; 601 struct sli4_sge *sgl; 602 dma_addr_t pdma_phys_fcp_rsp; 603 dma_addr_t pdma_phys_fcp_cmd; 604 uint32_t cpu, idx; 605 int tag; 606 struct fcp_cmd_rsp_buf *tmp = NULL; 607 608 cpu = raw_smp_processor_id(); 609 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 610 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 611 idx = blk_mq_unique_tag_to_hwq(tag); 612 } else { 613 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 614 } 615 616 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 617 !phba->cfg_xri_rebalancing); 618 if (!lpfc_cmd) { 619 qp = &phba->sli4_hba.hdwq[idx]; 620 qp->empty_io_bufs++; 621 return NULL; 622 } 623 624 /* Setup key fields in buffer that may have been changed 625 * if other protocols used this buffer. 626 */ 627 lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; 628 lpfc_cmd->prot_seg_cnt = 0; 629 lpfc_cmd->seg_cnt = 0; 630 lpfc_cmd->timeout = 0; 631 lpfc_cmd->flags = 0; 632 lpfc_cmd->start_time = jiffies; 633 lpfc_cmd->waitq = NULL; 634 lpfc_cmd->cpu = cpu; 635 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 636 lpfc_cmd->prot_data_type = 0; 637 #endif 638 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 639 if (!tmp) { 640 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 641 return NULL; 642 } 643 644 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 645 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 646 647 /* 648 * The first two SGEs are the FCP_CMD and FCP_RSP. 649 * The balance are sg list bdes. Initialize the 650 * first two and leave the rest for queuecommand. 651 */ 652 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 653 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 654 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 655 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 656 sgl->word2 = le32_to_cpu(sgl->word2); 657 bf_set(lpfc_sli4_sge_last, sgl, 0); 658 sgl->word2 = cpu_to_le32(sgl->word2); 659 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 660 sgl++; 661 662 /* Setup the physical region for the FCP RSP */ 663 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 664 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 665 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 666 sgl->word2 = le32_to_cpu(sgl->word2); 667 bf_set(lpfc_sli4_sge_last, sgl, 1); 668 sgl->word2 = cpu_to_le32(sgl->word2); 669 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 670 671 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 672 atomic_inc(&ndlp->cmd_pending); 673 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 674 } 675 return lpfc_cmd; 676 } 677 /** 678 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 679 * @phba: The HBA for which this call is being executed. 680 * @ndlp: pointer to a node-list data structure. 681 * @cmnd: Pointer to scsi_cmnd data structure. 682 * 683 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 684 * and returns to caller. 685 * 686 * Return codes: 687 * NULL - Error 688 * Pointer to lpfc_scsi_buf - Success 689 **/ 690 static struct lpfc_io_buf* 691 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 692 struct scsi_cmnd *cmnd) 693 { 694 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 695 } 696 697 /** 698 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 699 * @phba: The Hba for which this call is being executed. 700 * @psb: The scsi buffer which is being released. 701 * 702 * This routine releases @psb scsi buffer by adding it to tail of @phba 703 * lpfc_scsi_buf_list list. 704 **/ 705 static void 706 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 707 { 708 unsigned long iflag = 0; 709 710 psb->seg_cnt = 0; 711 psb->prot_seg_cnt = 0; 712 713 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 714 psb->pCmd = NULL; 715 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; 716 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 717 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 718 } 719 720 /** 721 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 722 * @phba: The Hba for which this call is being executed. 723 * @psb: The scsi buffer which is being released. 724 * 725 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 726 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 727 * and cannot be reused for at least RA_TOV amount of time if it was 728 * aborted. 729 **/ 730 static void 731 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 732 { 733 struct lpfc_sli4_hdw_queue *qp; 734 unsigned long iflag = 0; 735 736 psb->seg_cnt = 0; 737 psb->prot_seg_cnt = 0; 738 739 qp = psb->hdwq; 740 if (psb->flags & LPFC_SBUF_XBUSY) { 741 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 742 if (!phba->cfg_fcp_wait_abts_rsp) 743 psb->pCmd = NULL; 744 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 745 qp->abts_scsi_io_bufs++; 746 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 747 } else { 748 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 749 } 750 } 751 752 /** 753 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 754 * @phba: The Hba for which this call is being executed. 755 * @psb: The scsi buffer which is being released. 756 * 757 * This routine releases @psb scsi buffer by adding it to tail of @phba 758 * lpfc_scsi_buf_list list. 759 **/ 760 static void 761 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 762 { 763 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 764 atomic_dec(&psb->ndlp->cmd_pending); 765 766 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 767 phba->lpfc_release_scsi_buf(phba, psb); 768 } 769 770 /** 771 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 772 * @data: A pointer to the immediate command data portion of the IOCB. 773 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 774 * 775 * The routine copies the entire FCP command from @fcp_cmnd to @data while 776 * byte swapping the data to big endian format for transmission on the wire. 777 **/ 778 static void 779 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 780 { 781 int i, j; 782 783 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 784 i += sizeof(uint32_t), j++) { 785 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 786 } 787 } 788 789 /** 790 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 791 * @phba: The Hba for which this call is being executed. 792 * @lpfc_cmd: The scsi buffer which is going to be mapped. 793 * 794 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 795 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 796 * through sg elements and format the bde. This routine also initializes all 797 * IOCB fields which are dependent on scsi command request buffer. 798 * 799 * Return codes: 800 * 1 - Error 801 * 0 - Success 802 **/ 803 static int 804 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 805 { 806 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 807 struct scatterlist *sgel = NULL; 808 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 809 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 810 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 811 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 812 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 813 dma_addr_t physaddr; 814 uint32_t num_bde = 0; 815 int nseg, datadir = scsi_cmnd->sc_data_direction; 816 817 /* 818 * There are three possibilities here - use scatter-gather segment, use 819 * the single mapping, or neither. Start the lpfc command prep by 820 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 821 * data bde entry. 822 */ 823 bpl += 2; 824 if (scsi_sg_count(scsi_cmnd)) { 825 /* 826 * The driver stores the segment count returned from dma_map_sg 827 * because this a count of dma-mappings used to map the use_sg 828 * pages. They are not guaranteed to be the same for those 829 * architectures that implement an IOMMU. 830 */ 831 832 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 833 scsi_sg_count(scsi_cmnd), datadir); 834 if (unlikely(!nseg)) 835 return 1; 836 837 lpfc_cmd->seg_cnt = nseg; 838 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 840 "9064 BLKGRD: %s: Too many sg segments" 841 " from dma_map_sg. Config %d, seg_cnt" 842 " %d\n", __func__, phba->cfg_sg_seg_cnt, 843 lpfc_cmd->seg_cnt); 844 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 845 lpfc_cmd->seg_cnt = 0; 846 scsi_dma_unmap(scsi_cmnd); 847 return 2; 848 } 849 850 /* 851 * The driver established a maximum scatter-gather segment count 852 * during probe that limits the number of sg elements in any 853 * single scsi command. Just run through the seg_cnt and format 854 * the bde's. 855 * When using SLI-3 the driver will try to fit all the BDEs into 856 * the IOCB. If it can't then the BDEs get added to a BPL as it 857 * does for SLI-2 mode. 858 */ 859 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 860 physaddr = sg_dma_address(sgel); 861 if (phba->sli_rev == 3 && 862 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 863 !(iocbq->cmd_flag & DSS_SECURITY_OP) && 864 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 865 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 866 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 867 data_bde->addrLow = putPaddrLow(physaddr); 868 data_bde->addrHigh = putPaddrHigh(physaddr); 869 data_bde++; 870 } else { 871 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 872 bpl->tus.f.bdeSize = sg_dma_len(sgel); 873 bpl->tus.w = le32_to_cpu(bpl->tus.w); 874 bpl->addrLow = 875 le32_to_cpu(putPaddrLow(physaddr)); 876 bpl->addrHigh = 877 le32_to_cpu(putPaddrHigh(physaddr)); 878 bpl++; 879 } 880 } 881 } 882 883 /* 884 * Finish initializing those IOCB fields that are dependent on the 885 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 886 * explicitly reinitialized and for SLI-3 the extended bde count is 887 * explicitly reinitialized since all iocb memory resources are reused. 888 */ 889 if (phba->sli_rev == 3 && 890 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 891 !(iocbq->cmd_flag & DSS_SECURITY_OP)) { 892 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 893 /* 894 * The extended IOCB format can only fit 3 BDE or a BPL. 895 * This I/O has more than 3 BDE so the 1st data bde will 896 * be a BPL that is filled in here. 897 */ 898 physaddr = lpfc_cmd->dma_handle; 899 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 900 data_bde->tus.f.bdeSize = (num_bde * 901 sizeof(struct ulp_bde64)); 902 physaddr += (sizeof(struct fcp_cmnd) + 903 sizeof(struct fcp_rsp) + 904 (2 * sizeof(struct ulp_bde64))); 905 data_bde->addrHigh = putPaddrHigh(physaddr); 906 data_bde->addrLow = putPaddrLow(physaddr); 907 /* ebde count includes the response bde and data bpl */ 908 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 909 } else { 910 /* ebde count includes the response bde and data bdes */ 911 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 912 } 913 } else { 914 iocb_cmd->un.fcpi64.bdl.bdeSize = 915 ((num_bde + 2) * sizeof(struct ulp_bde64)); 916 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 917 } 918 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 919 920 /* 921 * Due to difference in data length between DIF/non-DIF paths, 922 * we need to set word 4 of IOCB here 923 */ 924 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 925 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 926 return 0; 927 } 928 929 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 930 931 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 932 #define BG_ERR_INIT 0x1 933 /* Return BG_ERR_TGT if error injection is detected by Target */ 934 #define BG_ERR_TGT 0x2 935 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 936 #define BG_ERR_SWAP 0x10 937 /* 938 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 939 * error injection 940 */ 941 #define BG_ERR_CHECK 0x20 942 943 /** 944 * lpfc_bg_err_inject - Determine if we should inject an error 945 * @phba: The Hba for which this call is being executed. 946 * @sc: The SCSI command to examine 947 * @reftag: (out) BlockGuard reference tag for transmitted data 948 * @apptag: (out) BlockGuard application tag for transmitted data 949 * @new_guard: (in) Value to replace CRC with if needed 950 * 951 * Returns BG_ERR_* bit mask or 0 if request ignored 952 **/ 953 static int 954 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 955 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 956 { 957 struct scatterlist *sgpe; /* s/g prot entry */ 958 struct lpfc_io_buf *lpfc_cmd = NULL; 959 struct scsi_dif_tuple *src = NULL; 960 struct lpfc_nodelist *ndlp; 961 struct lpfc_rport_data *rdata; 962 uint32_t op = scsi_get_prot_op(sc); 963 uint32_t blksize; 964 uint32_t numblks; 965 u32 lba; 966 int rc = 0; 967 int blockoff = 0; 968 969 if (op == SCSI_PROT_NORMAL) 970 return 0; 971 972 sgpe = scsi_prot_sglist(sc); 973 lba = scsi_prot_ref_tag(sc); 974 975 /* First check if we need to match the LBA */ 976 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 977 blksize = scsi_prot_interval(sc); 978 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 979 980 /* Make sure we have the right LBA if one is specified */ 981 if (phba->lpfc_injerr_lba < (u64)lba || 982 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 983 return 0; 984 if (sgpe) { 985 blockoff = phba->lpfc_injerr_lba - (u64)lba; 986 numblks = sg_dma_len(sgpe) / 987 sizeof(struct scsi_dif_tuple); 988 if (numblks < blockoff) 989 blockoff = numblks; 990 } 991 } 992 993 /* Next check if we need to match the remote NPortID or WWPN */ 994 rdata = lpfc_rport_data_from_scsi_device(sc->device); 995 if (rdata && rdata->pnode) { 996 ndlp = rdata->pnode; 997 998 /* Make sure we have the right NPortID if one is specified */ 999 if (phba->lpfc_injerr_nportid && 1000 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1001 return 0; 1002 1003 /* 1004 * Make sure we have the right WWPN if one is specified. 1005 * wwn[0] should be a non-zero NAA in a good WWPN. 1006 */ 1007 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1008 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1009 sizeof(struct lpfc_name)) != 0)) 1010 return 0; 1011 } 1012 1013 /* Setup a ptr to the protection data if the SCSI host provides it */ 1014 if (sgpe) { 1015 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1016 src += blockoff; 1017 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1018 } 1019 1020 /* Should we change the Reference Tag */ 1021 if (reftag) { 1022 if (phba->lpfc_injerr_wref_cnt) { 1023 switch (op) { 1024 case SCSI_PROT_WRITE_PASS: 1025 if (src) { 1026 /* 1027 * For WRITE_PASS, force the error 1028 * to be sent on the wire. It should 1029 * be detected by the Target. 1030 * If blockoff != 0 error will be 1031 * inserted in middle of the IO. 1032 */ 1033 1034 lpfc_printf_log(phba, KERN_ERR, 1035 LOG_TRACE_EVENT, 1036 "9076 BLKGRD: Injecting reftag error: " 1037 "write lba x%lx + x%x oldrefTag x%x\n", 1038 (unsigned long)lba, blockoff, 1039 be32_to_cpu(src->ref_tag)); 1040 1041 /* 1042 * Save the old ref_tag so we can 1043 * restore it on completion. 1044 */ 1045 if (lpfc_cmd) { 1046 lpfc_cmd->prot_data_type = 1047 LPFC_INJERR_REFTAG; 1048 lpfc_cmd->prot_data_segment = 1049 src; 1050 lpfc_cmd->prot_data = 1051 src->ref_tag; 1052 } 1053 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1054 phba->lpfc_injerr_wref_cnt--; 1055 if (phba->lpfc_injerr_wref_cnt == 0) { 1056 phba->lpfc_injerr_nportid = 0; 1057 phba->lpfc_injerr_lba = 1058 LPFC_INJERR_LBA_OFF; 1059 memset(&phba->lpfc_injerr_wwpn, 1060 0, sizeof(struct lpfc_name)); 1061 } 1062 rc = BG_ERR_TGT | BG_ERR_CHECK; 1063 1064 break; 1065 } 1066 fallthrough; 1067 case SCSI_PROT_WRITE_INSERT: 1068 /* 1069 * For WRITE_INSERT, force the error 1070 * to be sent on the wire. It should be 1071 * detected by the Target. 1072 */ 1073 /* DEADBEEF will be the reftag on the wire */ 1074 *reftag = 0xDEADBEEF; 1075 phba->lpfc_injerr_wref_cnt--; 1076 if (phba->lpfc_injerr_wref_cnt == 0) { 1077 phba->lpfc_injerr_nportid = 0; 1078 phba->lpfc_injerr_lba = 1079 LPFC_INJERR_LBA_OFF; 1080 memset(&phba->lpfc_injerr_wwpn, 1081 0, sizeof(struct lpfc_name)); 1082 } 1083 rc = BG_ERR_TGT | BG_ERR_CHECK; 1084 1085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1086 "9078 BLKGRD: Injecting reftag error: " 1087 "write lba x%lx\n", (unsigned long)lba); 1088 break; 1089 case SCSI_PROT_WRITE_STRIP: 1090 /* 1091 * For WRITE_STRIP and WRITE_PASS, 1092 * force the error on data 1093 * being copied from SLI-Host to SLI-Port. 1094 */ 1095 *reftag = 0xDEADBEEF; 1096 phba->lpfc_injerr_wref_cnt--; 1097 if (phba->lpfc_injerr_wref_cnt == 0) { 1098 phba->lpfc_injerr_nportid = 0; 1099 phba->lpfc_injerr_lba = 1100 LPFC_INJERR_LBA_OFF; 1101 memset(&phba->lpfc_injerr_wwpn, 1102 0, sizeof(struct lpfc_name)); 1103 } 1104 rc = BG_ERR_INIT; 1105 1106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1107 "9077 BLKGRD: Injecting reftag error: " 1108 "write lba x%lx\n", (unsigned long)lba); 1109 break; 1110 } 1111 } 1112 if (phba->lpfc_injerr_rref_cnt) { 1113 switch (op) { 1114 case SCSI_PROT_READ_INSERT: 1115 case SCSI_PROT_READ_STRIP: 1116 case SCSI_PROT_READ_PASS: 1117 /* 1118 * For READ_STRIP and READ_PASS, force the 1119 * error on data being read off the wire. It 1120 * should force an IO error to the driver. 1121 */ 1122 *reftag = 0xDEADBEEF; 1123 phba->lpfc_injerr_rref_cnt--; 1124 if (phba->lpfc_injerr_rref_cnt == 0) { 1125 phba->lpfc_injerr_nportid = 0; 1126 phba->lpfc_injerr_lba = 1127 LPFC_INJERR_LBA_OFF; 1128 memset(&phba->lpfc_injerr_wwpn, 1129 0, sizeof(struct lpfc_name)); 1130 } 1131 rc = BG_ERR_INIT; 1132 1133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1134 "9079 BLKGRD: Injecting reftag error: " 1135 "read lba x%lx\n", (unsigned long)lba); 1136 break; 1137 } 1138 } 1139 } 1140 1141 /* Should we change the Application Tag */ 1142 if (apptag) { 1143 if (phba->lpfc_injerr_wapp_cnt) { 1144 switch (op) { 1145 case SCSI_PROT_WRITE_PASS: 1146 if (src) { 1147 /* 1148 * For WRITE_PASS, force the error 1149 * to be sent on the wire. It should 1150 * be detected by the Target. 1151 * If blockoff != 0 error will be 1152 * inserted in middle of the IO. 1153 */ 1154 1155 lpfc_printf_log(phba, KERN_ERR, 1156 LOG_TRACE_EVENT, 1157 "9080 BLKGRD: Injecting apptag error: " 1158 "write lba x%lx + x%x oldappTag x%x\n", 1159 (unsigned long)lba, blockoff, 1160 be16_to_cpu(src->app_tag)); 1161 1162 /* 1163 * Save the old app_tag so we can 1164 * restore it on completion. 1165 */ 1166 if (lpfc_cmd) { 1167 lpfc_cmd->prot_data_type = 1168 LPFC_INJERR_APPTAG; 1169 lpfc_cmd->prot_data_segment = 1170 src; 1171 lpfc_cmd->prot_data = 1172 src->app_tag; 1173 } 1174 src->app_tag = cpu_to_be16(0xDEAD); 1175 phba->lpfc_injerr_wapp_cnt--; 1176 if (phba->lpfc_injerr_wapp_cnt == 0) { 1177 phba->lpfc_injerr_nportid = 0; 1178 phba->lpfc_injerr_lba = 1179 LPFC_INJERR_LBA_OFF; 1180 memset(&phba->lpfc_injerr_wwpn, 1181 0, sizeof(struct lpfc_name)); 1182 } 1183 rc = BG_ERR_TGT | BG_ERR_CHECK; 1184 break; 1185 } 1186 fallthrough; 1187 case SCSI_PROT_WRITE_INSERT: 1188 /* 1189 * For WRITE_INSERT, force the 1190 * error to be sent on the wire. It should be 1191 * detected by the Target. 1192 */ 1193 /* DEAD will be the apptag on the wire */ 1194 *apptag = 0xDEAD; 1195 phba->lpfc_injerr_wapp_cnt--; 1196 if (phba->lpfc_injerr_wapp_cnt == 0) { 1197 phba->lpfc_injerr_nportid = 0; 1198 phba->lpfc_injerr_lba = 1199 LPFC_INJERR_LBA_OFF; 1200 memset(&phba->lpfc_injerr_wwpn, 1201 0, sizeof(struct lpfc_name)); 1202 } 1203 rc = BG_ERR_TGT | BG_ERR_CHECK; 1204 1205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1206 "0813 BLKGRD: Injecting apptag error: " 1207 "write lba x%lx\n", (unsigned long)lba); 1208 break; 1209 case SCSI_PROT_WRITE_STRIP: 1210 /* 1211 * For WRITE_STRIP and WRITE_PASS, 1212 * force the error on data 1213 * being copied from SLI-Host to SLI-Port. 1214 */ 1215 *apptag = 0xDEAD; 1216 phba->lpfc_injerr_wapp_cnt--; 1217 if (phba->lpfc_injerr_wapp_cnt == 0) { 1218 phba->lpfc_injerr_nportid = 0; 1219 phba->lpfc_injerr_lba = 1220 LPFC_INJERR_LBA_OFF; 1221 memset(&phba->lpfc_injerr_wwpn, 1222 0, sizeof(struct lpfc_name)); 1223 } 1224 rc = BG_ERR_INIT; 1225 1226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1227 "0812 BLKGRD: Injecting apptag error: " 1228 "write lba x%lx\n", (unsigned long)lba); 1229 break; 1230 } 1231 } 1232 if (phba->lpfc_injerr_rapp_cnt) { 1233 switch (op) { 1234 case SCSI_PROT_READ_INSERT: 1235 case SCSI_PROT_READ_STRIP: 1236 case SCSI_PROT_READ_PASS: 1237 /* 1238 * For READ_STRIP and READ_PASS, force the 1239 * error on data being read off the wire. It 1240 * should force an IO error to the driver. 1241 */ 1242 *apptag = 0xDEAD; 1243 phba->lpfc_injerr_rapp_cnt--; 1244 if (phba->lpfc_injerr_rapp_cnt == 0) { 1245 phba->lpfc_injerr_nportid = 0; 1246 phba->lpfc_injerr_lba = 1247 LPFC_INJERR_LBA_OFF; 1248 memset(&phba->lpfc_injerr_wwpn, 1249 0, sizeof(struct lpfc_name)); 1250 } 1251 rc = BG_ERR_INIT; 1252 1253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1254 "0814 BLKGRD: Injecting apptag error: " 1255 "read lba x%lx\n", (unsigned long)lba); 1256 break; 1257 } 1258 } 1259 } 1260 1261 1262 /* Should we change the Guard Tag */ 1263 if (new_guard) { 1264 if (phba->lpfc_injerr_wgrd_cnt) { 1265 switch (op) { 1266 case SCSI_PROT_WRITE_PASS: 1267 rc = BG_ERR_CHECK; 1268 fallthrough; 1269 1270 case SCSI_PROT_WRITE_INSERT: 1271 /* 1272 * For WRITE_INSERT, force the 1273 * error to be sent on the wire. It should be 1274 * detected by the Target. 1275 */ 1276 phba->lpfc_injerr_wgrd_cnt--; 1277 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1278 phba->lpfc_injerr_nportid = 0; 1279 phba->lpfc_injerr_lba = 1280 LPFC_INJERR_LBA_OFF; 1281 memset(&phba->lpfc_injerr_wwpn, 1282 0, sizeof(struct lpfc_name)); 1283 } 1284 1285 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1286 /* Signals the caller to swap CRC->CSUM */ 1287 1288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1289 "0817 BLKGRD: Injecting guard error: " 1290 "write lba x%lx\n", (unsigned long)lba); 1291 break; 1292 case SCSI_PROT_WRITE_STRIP: 1293 /* 1294 * For WRITE_STRIP and WRITE_PASS, 1295 * force the error on data 1296 * being copied from SLI-Host to SLI-Port. 1297 */ 1298 phba->lpfc_injerr_wgrd_cnt--; 1299 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1300 phba->lpfc_injerr_nportid = 0; 1301 phba->lpfc_injerr_lba = 1302 LPFC_INJERR_LBA_OFF; 1303 memset(&phba->lpfc_injerr_wwpn, 1304 0, sizeof(struct lpfc_name)); 1305 } 1306 1307 rc = BG_ERR_INIT | BG_ERR_SWAP; 1308 /* Signals the caller to swap CRC->CSUM */ 1309 1310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1311 "0816 BLKGRD: Injecting guard error: " 1312 "write lba x%lx\n", (unsigned long)lba); 1313 break; 1314 } 1315 } 1316 if (phba->lpfc_injerr_rgrd_cnt) { 1317 switch (op) { 1318 case SCSI_PROT_READ_INSERT: 1319 case SCSI_PROT_READ_STRIP: 1320 case SCSI_PROT_READ_PASS: 1321 /* 1322 * For READ_STRIP and READ_PASS, force the 1323 * error on data being read off the wire. It 1324 * should force an IO error to the driver. 1325 */ 1326 phba->lpfc_injerr_rgrd_cnt--; 1327 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1328 phba->lpfc_injerr_nportid = 0; 1329 phba->lpfc_injerr_lba = 1330 LPFC_INJERR_LBA_OFF; 1331 memset(&phba->lpfc_injerr_wwpn, 1332 0, sizeof(struct lpfc_name)); 1333 } 1334 1335 rc = BG_ERR_INIT | BG_ERR_SWAP; 1336 /* Signals the caller to swap CRC->CSUM */ 1337 1338 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1339 "0818 BLKGRD: Injecting guard error: " 1340 "read lba x%lx\n", (unsigned long)lba); 1341 } 1342 } 1343 } 1344 1345 return rc; 1346 } 1347 #endif 1348 1349 /** 1350 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1351 * the specified SCSI command. 1352 * @phba: The Hba for which this call is being executed. 1353 * @sc: The SCSI command to examine 1354 * @txop: (out) BlockGuard operation for transmitted data 1355 * @rxop: (out) BlockGuard operation for received data 1356 * 1357 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1358 * 1359 **/ 1360 static int 1361 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1362 uint8_t *txop, uint8_t *rxop) 1363 { 1364 uint8_t ret = 0; 1365 1366 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1367 switch (scsi_get_prot_op(sc)) { 1368 case SCSI_PROT_READ_INSERT: 1369 case SCSI_PROT_WRITE_STRIP: 1370 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1371 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1372 break; 1373 1374 case SCSI_PROT_READ_STRIP: 1375 case SCSI_PROT_WRITE_INSERT: 1376 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1377 *txop = BG_OP_IN_NODIF_OUT_CRC; 1378 break; 1379 1380 case SCSI_PROT_READ_PASS: 1381 case SCSI_PROT_WRITE_PASS: 1382 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1383 *txop = BG_OP_IN_CSUM_OUT_CRC; 1384 break; 1385 1386 case SCSI_PROT_NORMAL: 1387 default: 1388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1389 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1390 scsi_get_prot_op(sc)); 1391 ret = 1; 1392 break; 1393 1394 } 1395 } else { 1396 switch (scsi_get_prot_op(sc)) { 1397 case SCSI_PROT_READ_STRIP: 1398 case SCSI_PROT_WRITE_INSERT: 1399 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1400 *txop = BG_OP_IN_NODIF_OUT_CRC; 1401 break; 1402 1403 case SCSI_PROT_READ_PASS: 1404 case SCSI_PROT_WRITE_PASS: 1405 *rxop = BG_OP_IN_CRC_OUT_CRC; 1406 *txop = BG_OP_IN_CRC_OUT_CRC; 1407 break; 1408 1409 case SCSI_PROT_READ_INSERT: 1410 case SCSI_PROT_WRITE_STRIP: 1411 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1412 *txop = BG_OP_IN_CRC_OUT_NODIF; 1413 break; 1414 1415 case SCSI_PROT_NORMAL: 1416 default: 1417 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1418 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1419 scsi_get_prot_op(sc)); 1420 ret = 1; 1421 break; 1422 } 1423 } 1424 1425 return ret; 1426 } 1427 1428 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1429 /** 1430 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1431 * the specified SCSI command in order to force a guard tag error. 1432 * @phba: The Hba for which this call is being executed. 1433 * @sc: The SCSI command to examine 1434 * @txop: (out) BlockGuard operation for transmitted data 1435 * @rxop: (out) BlockGuard operation for received data 1436 * 1437 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1438 * 1439 **/ 1440 static int 1441 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1442 uint8_t *txop, uint8_t *rxop) 1443 { 1444 1445 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1446 switch (scsi_get_prot_op(sc)) { 1447 case SCSI_PROT_READ_INSERT: 1448 case SCSI_PROT_WRITE_STRIP: 1449 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1450 *txop = BG_OP_IN_CRC_OUT_NODIF; 1451 break; 1452 1453 case SCSI_PROT_READ_STRIP: 1454 case SCSI_PROT_WRITE_INSERT: 1455 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1456 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1457 break; 1458 1459 case SCSI_PROT_READ_PASS: 1460 case SCSI_PROT_WRITE_PASS: 1461 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1462 *txop = BG_OP_IN_CRC_OUT_CSUM; 1463 break; 1464 1465 case SCSI_PROT_NORMAL: 1466 default: 1467 break; 1468 1469 } 1470 } else { 1471 switch (scsi_get_prot_op(sc)) { 1472 case SCSI_PROT_READ_STRIP: 1473 case SCSI_PROT_WRITE_INSERT: 1474 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1475 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1476 break; 1477 1478 case SCSI_PROT_READ_PASS: 1479 case SCSI_PROT_WRITE_PASS: 1480 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1481 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1482 break; 1483 1484 case SCSI_PROT_READ_INSERT: 1485 case SCSI_PROT_WRITE_STRIP: 1486 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1487 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1488 break; 1489 1490 case SCSI_PROT_NORMAL: 1491 default: 1492 break; 1493 } 1494 } 1495 1496 return 0; 1497 } 1498 #endif 1499 1500 /** 1501 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1502 * @phba: The Hba for which this call is being executed. 1503 * @sc: pointer to scsi command we're working on 1504 * @bpl: pointer to buffer list for protection groups 1505 * @datasegcnt: number of segments of data that have been dma mapped 1506 * 1507 * This function sets up BPL buffer list for protection groups of 1508 * type LPFC_PG_TYPE_NO_DIF 1509 * 1510 * This is usually used when the HBA is instructed to generate 1511 * DIFs and insert them into data stream (or strip DIF from 1512 * incoming data stream) 1513 * 1514 * The buffer list consists of just one protection group described 1515 * below: 1516 * +-------------------------+ 1517 * start of prot group --> | PDE_5 | 1518 * +-------------------------+ 1519 * | PDE_6 | 1520 * +-------------------------+ 1521 * | Data BDE | 1522 * +-------------------------+ 1523 * |more Data BDE's ... (opt)| 1524 * +-------------------------+ 1525 * 1526 * 1527 * Note: Data s/g buffers have been dma mapped 1528 * 1529 * Returns the number of BDEs added to the BPL. 1530 **/ 1531 static int 1532 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1533 struct ulp_bde64 *bpl, int datasegcnt) 1534 { 1535 struct scatterlist *sgde = NULL; /* s/g data entry */ 1536 struct lpfc_pde5 *pde5 = NULL; 1537 struct lpfc_pde6 *pde6 = NULL; 1538 dma_addr_t physaddr; 1539 int i = 0, num_bde = 0, status; 1540 int datadir = sc->sc_data_direction; 1541 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1542 uint32_t rc; 1543 #endif 1544 uint32_t checking = 1; 1545 uint32_t reftag; 1546 uint8_t txop, rxop; 1547 1548 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1549 if (status) 1550 goto out; 1551 1552 /* extract some info from the scsi command for pde*/ 1553 reftag = scsi_prot_ref_tag(sc); 1554 1555 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1556 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1557 if (rc) { 1558 if (rc & BG_ERR_SWAP) 1559 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1560 if (rc & BG_ERR_CHECK) 1561 checking = 0; 1562 } 1563 #endif 1564 1565 /* setup PDE5 with what we have */ 1566 pde5 = (struct lpfc_pde5 *) bpl; 1567 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1568 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1569 1570 /* Endianness conversion if necessary for PDE5 */ 1571 pde5->word0 = cpu_to_le32(pde5->word0); 1572 pde5->reftag = cpu_to_le32(reftag); 1573 1574 /* advance bpl and increment bde count */ 1575 num_bde++; 1576 bpl++; 1577 pde6 = (struct lpfc_pde6 *) bpl; 1578 1579 /* setup PDE6 with the rest of the info */ 1580 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1581 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1582 bf_set(pde6_optx, pde6, txop); 1583 bf_set(pde6_oprx, pde6, rxop); 1584 1585 /* 1586 * We only need to check the data on READs, for WRITEs 1587 * protection data is automatically generated, not checked. 1588 */ 1589 if (datadir == DMA_FROM_DEVICE) { 1590 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1591 bf_set(pde6_ce, pde6, checking); 1592 else 1593 bf_set(pde6_ce, pde6, 0); 1594 1595 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1596 bf_set(pde6_re, pde6, checking); 1597 else 1598 bf_set(pde6_re, pde6, 0); 1599 } 1600 bf_set(pde6_ai, pde6, 1); 1601 bf_set(pde6_ae, pde6, 0); 1602 bf_set(pde6_apptagval, pde6, 0); 1603 1604 /* Endianness conversion if necessary for PDE6 */ 1605 pde6->word0 = cpu_to_le32(pde6->word0); 1606 pde6->word1 = cpu_to_le32(pde6->word1); 1607 pde6->word2 = cpu_to_le32(pde6->word2); 1608 1609 /* advance bpl and increment bde count */ 1610 num_bde++; 1611 bpl++; 1612 1613 /* assumption: caller has already run dma_map_sg on command data */ 1614 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1615 physaddr = sg_dma_address(sgde); 1616 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1617 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1618 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1619 if (datadir == DMA_TO_DEVICE) 1620 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1621 else 1622 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1623 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1624 bpl++; 1625 num_bde++; 1626 } 1627 1628 out: 1629 return num_bde; 1630 } 1631 1632 /** 1633 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1634 * @phba: The Hba for which this call is being executed. 1635 * @sc: pointer to scsi command we're working on 1636 * @bpl: pointer to buffer list for protection groups 1637 * @datacnt: number of segments of data that have been dma mapped 1638 * @protcnt: number of segment of protection data that have been dma mapped 1639 * 1640 * This function sets up BPL buffer list for protection groups of 1641 * type LPFC_PG_TYPE_DIF 1642 * 1643 * This is usually used when DIFs are in their own buffers, 1644 * separate from the data. The HBA can then by instructed 1645 * to place the DIFs in the outgoing stream. For read operations, 1646 * The HBA could extract the DIFs and place it in DIF buffers. 1647 * 1648 * The buffer list for this type consists of one or more of the 1649 * protection groups described below: 1650 * +-------------------------+ 1651 * start of first prot group --> | PDE_5 | 1652 * +-------------------------+ 1653 * | PDE_6 | 1654 * +-------------------------+ 1655 * | PDE_7 (Prot BDE) | 1656 * +-------------------------+ 1657 * | Data BDE | 1658 * +-------------------------+ 1659 * |more Data BDE's ... (opt)| 1660 * +-------------------------+ 1661 * start of new prot group --> | PDE_5 | 1662 * +-------------------------+ 1663 * | ... | 1664 * +-------------------------+ 1665 * 1666 * Note: It is assumed that both data and protection s/g buffers have been 1667 * mapped for DMA 1668 * 1669 * Returns the number of BDEs added to the BPL. 1670 **/ 1671 static int 1672 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1673 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1674 { 1675 struct scatterlist *sgde = NULL; /* s/g data entry */ 1676 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1677 struct lpfc_pde5 *pde5 = NULL; 1678 struct lpfc_pde6 *pde6 = NULL; 1679 struct lpfc_pde7 *pde7 = NULL; 1680 dma_addr_t dataphysaddr, protphysaddr; 1681 unsigned short curr_prot = 0; 1682 unsigned int split_offset; 1683 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1684 unsigned int protgrp_blks, protgrp_bytes; 1685 unsigned int remainder, subtotal; 1686 int status; 1687 int datadir = sc->sc_data_direction; 1688 unsigned char pgdone = 0, alldone = 0; 1689 unsigned blksize; 1690 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1691 uint32_t rc; 1692 #endif 1693 uint32_t checking = 1; 1694 uint32_t reftag; 1695 uint8_t txop, rxop; 1696 int num_bde = 0; 1697 1698 sgpe = scsi_prot_sglist(sc); 1699 sgde = scsi_sglist(sc); 1700 1701 if (!sgpe || !sgde) { 1702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1703 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1704 sgpe, sgde); 1705 return 0; 1706 } 1707 1708 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1709 if (status) 1710 goto out; 1711 1712 /* extract some info from the scsi command */ 1713 blksize = scsi_prot_interval(sc); 1714 reftag = scsi_prot_ref_tag(sc); 1715 1716 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1717 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1718 if (rc) { 1719 if (rc & BG_ERR_SWAP) 1720 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1721 if (rc & BG_ERR_CHECK) 1722 checking = 0; 1723 } 1724 #endif 1725 1726 split_offset = 0; 1727 do { 1728 /* Check to see if we ran out of space */ 1729 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1730 return num_bde + 3; 1731 1732 /* setup PDE5 with what we have */ 1733 pde5 = (struct lpfc_pde5 *) bpl; 1734 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1735 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1736 1737 /* Endianness conversion if necessary for PDE5 */ 1738 pde5->word0 = cpu_to_le32(pde5->word0); 1739 pde5->reftag = cpu_to_le32(reftag); 1740 1741 /* advance bpl and increment bde count */ 1742 num_bde++; 1743 bpl++; 1744 pde6 = (struct lpfc_pde6 *) bpl; 1745 1746 /* setup PDE6 with the rest of the info */ 1747 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1748 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1749 bf_set(pde6_optx, pde6, txop); 1750 bf_set(pde6_oprx, pde6, rxop); 1751 1752 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1753 bf_set(pde6_ce, pde6, checking); 1754 else 1755 bf_set(pde6_ce, pde6, 0); 1756 1757 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1758 bf_set(pde6_re, pde6, checking); 1759 else 1760 bf_set(pde6_re, pde6, 0); 1761 1762 bf_set(pde6_ai, pde6, 1); 1763 bf_set(pde6_ae, pde6, 0); 1764 bf_set(pde6_apptagval, pde6, 0); 1765 1766 /* Endianness conversion if necessary for PDE6 */ 1767 pde6->word0 = cpu_to_le32(pde6->word0); 1768 pde6->word1 = cpu_to_le32(pde6->word1); 1769 pde6->word2 = cpu_to_le32(pde6->word2); 1770 1771 /* advance bpl and increment bde count */ 1772 num_bde++; 1773 bpl++; 1774 1775 /* setup the first BDE that points to protection buffer */ 1776 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1777 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1778 1779 /* must be integer multiple of the DIF block length */ 1780 BUG_ON(protgroup_len % 8); 1781 1782 pde7 = (struct lpfc_pde7 *) bpl; 1783 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1784 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1785 1786 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1787 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1788 1789 protgrp_blks = protgroup_len / 8; 1790 protgrp_bytes = protgrp_blks * blksize; 1791 1792 /* check if this pde is crossing the 4K boundary; if so split */ 1793 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1794 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1795 protgroup_offset += protgroup_remainder; 1796 protgrp_blks = protgroup_remainder / 8; 1797 protgrp_bytes = protgrp_blks * blksize; 1798 } else { 1799 protgroup_offset = 0; 1800 curr_prot++; 1801 } 1802 1803 num_bde++; 1804 1805 /* setup BDE's for data blocks associated with DIF data */ 1806 pgdone = 0; 1807 subtotal = 0; /* total bytes processed for current prot grp */ 1808 while (!pgdone) { 1809 /* Check to see if we ran out of space */ 1810 if (num_bde >= phba->cfg_total_seg_cnt) 1811 return num_bde + 1; 1812 1813 if (!sgde) { 1814 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1815 "9065 BLKGRD:%s Invalid data segment\n", 1816 __func__); 1817 return 0; 1818 } 1819 bpl++; 1820 dataphysaddr = sg_dma_address(sgde) + split_offset; 1821 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1822 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1823 1824 remainder = sg_dma_len(sgde) - split_offset; 1825 1826 if ((subtotal + remainder) <= protgrp_bytes) { 1827 /* we can use this whole buffer */ 1828 bpl->tus.f.bdeSize = remainder; 1829 split_offset = 0; 1830 1831 if ((subtotal + remainder) == protgrp_bytes) 1832 pgdone = 1; 1833 } else { 1834 /* must split this buffer with next prot grp */ 1835 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1836 split_offset += bpl->tus.f.bdeSize; 1837 } 1838 1839 subtotal += bpl->tus.f.bdeSize; 1840 1841 if (datadir == DMA_TO_DEVICE) 1842 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1843 else 1844 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1845 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1846 1847 num_bde++; 1848 1849 if (split_offset) 1850 break; 1851 1852 /* Move to the next s/g segment if possible */ 1853 sgde = sg_next(sgde); 1854 1855 } 1856 1857 if (protgroup_offset) { 1858 /* update the reference tag */ 1859 reftag += protgrp_blks; 1860 bpl++; 1861 continue; 1862 } 1863 1864 /* are we done ? */ 1865 if (curr_prot == protcnt) { 1866 alldone = 1; 1867 } else if (curr_prot < protcnt) { 1868 /* advance to next prot buffer */ 1869 sgpe = sg_next(sgpe); 1870 bpl++; 1871 1872 /* update the reference tag */ 1873 reftag += protgrp_blks; 1874 } else { 1875 /* if we're here, we have a bug */ 1876 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1877 "9054 BLKGRD: bug in %s\n", __func__); 1878 } 1879 1880 } while (!alldone); 1881 out: 1882 1883 return num_bde; 1884 } 1885 1886 /** 1887 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1888 * @phba: The Hba for which this call is being executed. 1889 * @sc: pointer to scsi command we're working on 1890 * @sgl: pointer to buffer list for protection groups 1891 * @datasegcnt: number of segments of data that have been dma mapped 1892 * @lpfc_cmd: lpfc scsi command object pointer. 1893 * 1894 * This function sets up SGL buffer list for protection groups of 1895 * type LPFC_PG_TYPE_NO_DIF 1896 * 1897 * This is usually used when the HBA is instructed to generate 1898 * DIFs and insert them into data stream (or strip DIF from 1899 * incoming data stream) 1900 * 1901 * The buffer list consists of just one protection group described 1902 * below: 1903 * +-------------------------+ 1904 * start of prot group --> | DI_SEED | 1905 * +-------------------------+ 1906 * | Data SGE | 1907 * +-------------------------+ 1908 * |more Data SGE's ... (opt)| 1909 * +-------------------------+ 1910 * 1911 * 1912 * Note: Data s/g buffers have been dma mapped 1913 * 1914 * Returns the number of SGEs added to the SGL. 1915 **/ 1916 static uint32_t 1917 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1918 struct sli4_sge *sgl, int datasegcnt, 1919 struct lpfc_io_buf *lpfc_cmd) 1920 { 1921 struct scatterlist *sgde = NULL; /* s/g data entry */ 1922 struct sli4_sge_diseed *diseed = NULL; 1923 dma_addr_t physaddr; 1924 int i = 0, status; 1925 uint32_t reftag, num_sge = 0; 1926 uint8_t txop, rxop; 1927 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1928 uint32_t rc; 1929 #endif 1930 uint32_t checking = 1; 1931 uint32_t dma_len; 1932 uint32_t dma_offset = 0; 1933 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1934 int j; 1935 bool lsp_just_set = false; 1936 1937 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1938 if (status) 1939 goto out; 1940 1941 /* extract some info from the scsi command for pde*/ 1942 reftag = scsi_prot_ref_tag(sc); 1943 1944 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1945 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1946 if (rc) { 1947 if (rc & BG_ERR_SWAP) 1948 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1949 if (rc & BG_ERR_CHECK) 1950 checking = 0; 1951 } 1952 #endif 1953 1954 /* setup DISEED with what we have */ 1955 diseed = (struct sli4_sge_diseed *) sgl; 1956 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 1957 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 1958 1959 /* Endianness conversion if necessary */ 1960 diseed->ref_tag = cpu_to_le32(reftag); 1961 diseed->ref_tag_tran = diseed->ref_tag; 1962 1963 /* 1964 * We only need to check the data on READs, for WRITEs 1965 * protection data is automatically generated, not checked. 1966 */ 1967 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 1968 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1969 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 1970 else 1971 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 1972 1973 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1974 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 1975 else 1976 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 1977 } 1978 1979 /* setup DISEED with the rest of the info */ 1980 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 1981 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 1982 1983 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 1984 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 1985 1986 /* Endianness conversion if necessary for DISEED */ 1987 diseed->word2 = cpu_to_le32(diseed->word2); 1988 diseed->word3 = cpu_to_le32(diseed->word3); 1989 1990 /* advance bpl and increment sge count */ 1991 num_sge++; 1992 sgl++; 1993 1994 /* assumption: caller has already run dma_map_sg on command data */ 1995 sgde = scsi_sglist(sc); 1996 j = 3; 1997 for (i = 0; i < datasegcnt; i++) { 1998 /* clear it */ 1999 sgl->word2 = 0; 2000 2001 /* do we need to expand the segment */ 2002 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2003 ((datasegcnt - 1) != i)) { 2004 /* set LSP type */ 2005 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2006 2007 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2008 2009 if (unlikely(!sgl_xtra)) { 2010 lpfc_cmd->seg_cnt = 0; 2011 return 0; 2012 } 2013 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2014 sgl_xtra->dma_phys_sgl)); 2015 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2016 sgl_xtra->dma_phys_sgl)); 2017 2018 } else { 2019 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2020 } 2021 2022 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2023 if ((datasegcnt - 1) == i) 2024 bf_set(lpfc_sli4_sge_last, sgl, 1); 2025 physaddr = sg_dma_address(sgde); 2026 dma_len = sg_dma_len(sgde); 2027 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2028 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2029 2030 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2031 sgl->word2 = cpu_to_le32(sgl->word2); 2032 sgl->sge_len = cpu_to_le32(dma_len); 2033 2034 dma_offset += dma_len; 2035 sgde = sg_next(sgde); 2036 2037 sgl++; 2038 num_sge++; 2039 lsp_just_set = false; 2040 2041 } else { 2042 sgl->word2 = cpu_to_le32(sgl->word2); 2043 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2044 2045 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2046 i = i - 1; 2047 2048 lsp_just_set = true; 2049 } 2050 2051 j++; 2052 2053 } 2054 2055 out: 2056 return num_sge; 2057 } 2058 2059 /** 2060 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2061 * @phba: The Hba for which this call is being executed. 2062 * @sc: pointer to scsi command we're working on 2063 * @sgl: pointer to buffer list for protection groups 2064 * @datacnt: number of segments of data that have been dma mapped 2065 * @protcnt: number of segment of protection data that have been dma mapped 2066 * @lpfc_cmd: lpfc scsi command object pointer. 2067 * 2068 * This function sets up SGL buffer list for protection groups of 2069 * type LPFC_PG_TYPE_DIF 2070 * 2071 * This is usually used when DIFs are in their own buffers, 2072 * separate from the data. The HBA can then by instructed 2073 * to place the DIFs in the outgoing stream. For read operations, 2074 * The HBA could extract the DIFs and place it in DIF buffers. 2075 * 2076 * The buffer list for this type consists of one or more of the 2077 * protection groups described below: 2078 * +-------------------------+ 2079 * start of first prot group --> | DISEED | 2080 * +-------------------------+ 2081 * | DIF (Prot SGE) | 2082 * +-------------------------+ 2083 * | Data SGE | 2084 * +-------------------------+ 2085 * |more Data SGE's ... (opt)| 2086 * +-------------------------+ 2087 * start of new prot group --> | DISEED | 2088 * +-------------------------+ 2089 * | ... | 2090 * +-------------------------+ 2091 * 2092 * Note: It is assumed that both data and protection s/g buffers have been 2093 * mapped for DMA 2094 * 2095 * Returns the number of SGEs added to the SGL. 2096 **/ 2097 static uint32_t 2098 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2099 struct sli4_sge *sgl, int datacnt, int protcnt, 2100 struct lpfc_io_buf *lpfc_cmd) 2101 { 2102 struct scatterlist *sgde = NULL; /* s/g data entry */ 2103 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2104 struct sli4_sge_diseed *diseed = NULL; 2105 dma_addr_t dataphysaddr, protphysaddr; 2106 unsigned short curr_prot = 0; 2107 unsigned int split_offset; 2108 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2109 unsigned int protgrp_blks, protgrp_bytes; 2110 unsigned int remainder, subtotal; 2111 int status; 2112 unsigned char pgdone = 0, alldone = 0; 2113 unsigned blksize; 2114 uint32_t reftag; 2115 uint8_t txop, rxop; 2116 uint32_t dma_len; 2117 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2118 uint32_t rc; 2119 #endif 2120 uint32_t checking = 1; 2121 uint32_t dma_offset = 0, num_sge = 0; 2122 int j = 2; 2123 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2124 2125 sgpe = scsi_prot_sglist(sc); 2126 sgde = scsi_sglist(sc); 2127 2128 if (!sgpe || !sgde) { 2129 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2130 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2131 sgpe, sgde); 2132 return 0; 2133 } 2134 2135 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2136 if (status) 2137 goto out; 2138 2139 /* extract some info from the scsi command */ 2140 blksize = scsi_prot_interval(sc); 2141 reftag = scsi_prot_ref_tag(sc); 2142 2143 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2144 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2145 if (rc) { 2146 if (rc & BG_ERR_SWAP) 2147 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2148 if (rc & BG_ERR_CHECK) 2149 checking = 0; 2150 } 2151 #endif 2152 2153 split_offset = 0; 2154 do { 2155 /* Check to see if we ran out of space */ 2156 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2157 !(phba->cfg_xpsgl)) 2158 return num_sge + 3; 2159 2160 /* DISEED and DIF have to be together */ 2161 if (!((j + 1) % phba->border_sge_num) || 2162 !((j + 2) % phba->border_sge_num) || 2163 !((j + 3) % phba->border_sge_num)) { 2164 sgl->word2 = 0; 2165 2166 /* set LSP type */ 2167 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2168 2169 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2170 2171 if (unlikely(!sgl_xtra)) { 2172 goto out; 2173 } else { 2174 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2175 sgl_xtra->dma_phys_sgl)); 2176 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2177 sgl_xtra->dma_phys_sgl)); 2178 } 2179 2180 sgl->word2 = cpu_to_le32(sgl->word2); 2181 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2182 2183 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2184 j = 0; 2185 } 2186 2187 /* setup DISEED with what we have */ 2188 diseed = (struct sli4_sge_diseed *) sgl; 2189 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2190 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2191 2192 /* Endianness conversion if necessary */ 2193 diseed->ref_tag = cpu_to_le32(reftag); 2194 diseed->ref_tag_tran = diseed->ref_tag; 2195 2196 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { 2197 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2198 } else { 2199 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2200 /* 2201 * When in this mode, the hardware will replace 2202 * the guard tag from the host with a 2203 * newly generated good CRC for the wire. 2204 * Switch to raw mode here to avoid this 2205 * behavior. What the host sends gets put on the wire. 2206 */ 2207 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2208 txop = BG_OP_RAW_MODE; 2209 rxop = BG_OP_RAW_MODE; 2210 } 2211 } 2212 2213 2214 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2215 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2216 else 2217 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2218 2219 /* setup DISEED with the rest of the info */ 2220 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2221 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2222 2223 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2224 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2225 2226 /* Endianness conversion if necessary for DISEED */ 2227 diseed->word2 = cpu_to_le32(diseed->word2); 2228 diseed->word3 = cpu_to_le32(diseed->word3); 2229 2230 /* advance sgl and increment bde count */ 2231 num_sge++; 2232 2233 sgl++; 2234 j++; 2235 2236 /* setup the first BDE that points to protection buffer */ 2237 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2238 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2239 2240 /* must be integer multiple of the DIF block length */ 2241 BUG_ON(protgroup_len % 8); 2242 2243 /* Now setup DIF SGE */ 2244 sgl->word2 = 0; 2245 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2246 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2247 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2248 sgl->word2 = cpu_to_le32(sgl->word2); 2249 sgl->sge_len = 0; 2250 2251 protgrp_blks = protgroup_len / 8; 2252 protgrp_bytes = protgrp_blks * blksize; 2253 2254 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2255 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2256 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2257 protgroup_offset += protgroup_remainder; 2258 protgrp_blks = protgroup_remainder / 8; 2259 protgrp_bytes = protgrp_blks * blksize; 2260 } else { 2261 protgroup_offset = 0; 2262 curr_prot++; 2263 } 2264 2265 num_sge++; 2266 2267 /* setup SGE's for data blocks associated with DIF data */ 2268 pgdone = 0; 2269 subtotal = 0; /* total bytes processed for current prot grp */ 2270 2271 sgl++; 2272 j++; 2273 2274 while (!pgdone) { 2275 /* Check to see if we ran out of space */ 2276 if ((num_sge >= phba->cfg_total_seg_cnt) && 2277 !phba->cfg_xpsgl) 2278 return num_sge + 1; 2279 2280 if (!sgde) { 2281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2282 "9086 BLKGRD:%s Invalid data segment\n", 2283 __func__); 2284 return 0; 2285 } 2286 2287 if (!((j + 1) % phba->border_sge_num)) { 2288 sgl->word2 = 0; 2289 2290 /* set LSP type */ 2291 bf_set(lpfc_sli4_sge_type, sgl, 2292 LPFC_SGE_TYPE_LSP); 2293 2294 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2295 lpfc_cmd); 2296 2297 if (unlikely(!sgl_xtra)) { 2298 goto out; 2299 } else { 2300 sgl->addr_lo = cpu_to_le32( 2301 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2302 sgl->addr_hi = cpu_to_le32( 2303 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2304 } 2305 2306 sgl->word2 = cpu_to_le32(sgl->word2); 2307 sgl->sge_len = cpu_to_le32( 2308 phba->cfg_sg_dma_buf_size); 2309 2310 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2311 } else { 2312 dataphysaddr = sg_dma_address(sgde) + 2313 split_offset; 2314 2315 remainder = sg_dma_len(sgde) - split_offset; 2316 2317 if ((subtotal + remainder) <= protgrp_bytes) { 2318 /* we can use this whole buffer */ 2319 dma_len = remainder; 2320 split_offset = 0; 2321 2322 if ((subtotal + remainder) == 2323 protgrp_bytes) 2324 pgdone = 1; 2325 } else { 2326 /* must split this buffer with next 2327 * prot grp 2328 */ 2329 dma_len = protgrp_bytes - subtotal; 2330 split_offset += dma_len; 2331 } 2332 2333 subtotal += dma_len; 2334 2335 sgl->word2 = 0; 2336 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2337 dataphysaddr)); 2338 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2339 dataphysaddr)); 2340 bf_set(lpfc_sli4_sge_last, sgl, 0); 2341 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2342 bf_set(lpfc_sli4_sge_type, sgl, 2343 LPFC_SGE_TYPE_DATA); 2344 2345 sgl->sge_len = cpu_to_le32(dma_len); 2346 dma_offset += dma_len; 2347 2348 num_sge++; 2349 2350 if (split_offset) { 2351 sgl++; 2352 j++; 2353 break; 2354 } 2355 2356 /* Move to the next s/g segment if possible */ 2357 sgde = sg_next(sgde); 2358 2359 sgl++; 2360 } 2361 2362 j++; 2363 } 2364 2365 if (protgroup_offset) { 2366 /* update the reference tag */ 2367 reftag += protgrp_blks; 2368 continue; 2369 } 2370 2371 /* are we done ? */ 2372 if (curr_prot == protcnt) { 2373 /* mark the last SGL */ 2374 sgl--; 2375 bf_set(lpfc_sli4_sge_last, sgl, 1); 2376 alldone = 1; 2377 } else if (curr_prot < protcnt) { 2378 /* advance to next prot buffer */ 2379 sgpe = sg_next(sgpe); 2380 2381 /* update the reference tag */ 2382 reftag += protgrp_blks; 2383 } else { 2384 /* if we're here, we have a bug */ 2385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2386 "9085 BLKGRD: bug in %s\n", __func__); 2387 } 2388 2389 } while (!alldone); 2390 2391 out: 2392 2393 return num_sge; 2394 } 2395 2396 /** 2397 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2398 * @phba: The Hba for which this call is being executed. 2399 * @sc: pointer to scsi command we're working on 2400 * 2401 * Given a SCSI command that supports DIF, determine composition of protection 2402 * groups involved in setting up buffer lists 2403 * 2404 * Returns: Protection group type (with or without DIF) 2405 * 2406 **/ 2407 static int 2408 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2409 { 2410 int ret = LPFC_PG_TYPE_INVALID; 2411 unsigned char op = scsi_get_prot_op(sc); 2412 2413 switch (op) { 2414 case SCSI_PROT_READ_STRIP: 2415 case SCSI_PROT_WRITE_INSERT: 2416 ret = LPFC_PG_TYPE_NO_DIF; 2417 break; 2418 case SCSI_PROT_READ_INSERT: 2419 case SCSI_PROT_WRITE_STRIP: 2420 case SCSI_PROT_READ_PASS: 2421 case SCSI_PROT_WRITE_PASS: 2422 ret = LPFC_PG_TYPE_DIF_BUF; 2423 break; 2424 default: 2425 if (phba) 2426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2427 "9021 Unsupported protection op:%d\n", 2428 op); 2429 break; 2430 } 2431 return ret; 2432 } 2433 2434 /** 2435 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2436 * @phba: The Hba for which this call is being executed. 2437 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2438 * 2439 * Adjust the data length to account for how much data 2440 * is actually on the wire. 2441 * 2442 * returns the adjusted data length 2443 **/ 2444 static int 2445 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2446 struct lpfc_io_buf *lpfc_cmd) 2447 { 2448 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2449 int fcpdl; 2450 2451 fcpdl = scsi_bufflen(sc); 2452 2453 /* Check if there is protection data on the wire */ 2454 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2455 /* Read check for protection data */ 2456 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2457 return fcpdl; 2458 2459 } else { 2460 /* Write check for protection data */ 2461 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2462 return fcpdl; 2463 } 2464 2465 /* 2466 * If we are in DIF Type 1 mode every data block has a 8 byte 2467 * DIF (trailer) attached to it. Must ajust FCP data length 2468 * to account for the protection data. 2469 */ 2470 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; 2471 2472 return fcpdl; 2473 } 2474 2475 /** 2476 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2477 * @phba: The Hba for which this call is being executed. 2478 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2479 * 2480 * This is the protection/DIF aware version of 2481 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2482 * two functions eventually, but for now, it's here. 2483 * RETURNS 0 - SUCCESS, 2484 * 1 - Failed DMA map, retry. 2485 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2486 **/ 2487 static int 2488 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2489 struct lpfc_io_buf *lpfc_cmd) 2490 { 2491 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2492 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2493 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2494 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2495 uint32_t num_bde = 0; 2496 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2497 int prot_group_type = 0; 2498 int fcpdl; 2499 int ret = 1; 2500 struct lpfc_vport *vport = phba->pport; 2501 2502 /* 2503 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2504 * fcp_rsp regions to the first data bde entry 2505 */ 2506 bpl += 2; 2507 if (scsi_sg_count(scsi_cmnd)) { 2508 /* 2509 * The driver stores the segment count returned from dma_map_sg 2510 * because this a count of dma-mappings used to map the use_sg 2511 * pages. They are not guaranteed to be the same for those 2512 * architectures that implement an IOMMU. 2513 */ 2514 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2515 scsi_sglist(scsi_cmnd), 2516 scsi_sg_count(scsi_cmnd), datadir); 2517 if (unlikely(!datasegcnt)) 2518 return 1; 2519 2520 lpfc_cmd->seg_cnt = datasegcnt; 2521 2522 /* First check if data segment count from SCSI Layer is good */ 2523 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2524 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2525 ret = 2; 2526 goto err; 2527 } 2528 2529 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2530 2531 switch (prot_group_type) { 2532 case LPFC_PG_TYPE_NO_DIF: 2533 2534 /* Here we need to add a PDE5 and PDE6 to the count */ 2535 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2536 ret = 2; 2537 goto err; 2538 } 2539 2540 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2541 datasegcnt); 2542 /* we should have 2 or more entries in buffer list */ 2543 if (num_bde < 2) { 2544 ret = 2; 2545 goto err; 2546 } 2547 break; 2548 2549 case LPFC_PG_TYPE_DIF_BUF: 2550 /* 2551 * This type indicates that protection buffers are 2552 * passed to the driver, so that needs to be prepared 2553 * for DMA 2554 */ 2555 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2556 scsi_prot_sglist(scsi_cmnd), 2557 scsi_prot_sg_count(scsi_cmnd), datadir); 2558 if (unlikely(!protsegcnt)) { 2559 scsi_dma_unmap(scsi_cmnd); 2560 return 1; 2561 } 2562 2563 lpfc_cmd->prot_seg_cnt = protsegcnt; 2564 2565 /* 2566 * There is a minimun of 4 BPLs used for every 2567 * protection data segment. 2568 */ 2569 if ((lpfc_cmd->prot_seg_cnt * 4) > 2570 (phba->cfg_total_seg_cnt - 2)) { 2571 ret = 2; 2572 goto err; 2573 } 2574 2575 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2576 datasegcnt, protsegcnt); 2577 /* we should have 3 or more entries in buffer list */ 2578 if ((num_bde < 3) || 2579 (num_bde > phba->cfg_total_seg_cnt)) { 2580 ret = 2; 2581 goto err; 2582 } 2583 break; 2584 2585 case LPFC_PG_TYPE_INVALID: 2586 default: 2587 scsi_dma_unmap(scsi_cmnd); 2588 lpfc_cmd->seg_cnt = 0; 2589 2590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2591 "9022 Unexpected protection group %i\n", 2592 prot_group_type); 2593 return 2; 2594 } 2595 } 2596 2597 /* 2598 * Finish initializing those IOCB fields that are dependent on the 2599 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2600 * reinitialized since all iocb memory resources are used many times 2601 * for transmit, receive, and continuation bpl's. 2602 */ 2603 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2604 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2605 iocb_cmd->ulpBdeCount = 1; 2606 iocb_cmd->ulpLe = 1; 2607 2608 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2609 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2610 2611 /* 2612 * Due to difference in data length between DIF/non-DIF paths, 2613 * we need to set word 4 of IOCB here 2614 */ 2615 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2616 2617 /* 2618 * For First burst, we may need to adjust the initial transfer 2619 * length for DIF 2620 */ 2621 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2622 (fcpdl < vport->cfg_first_burst_size)) 2623 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2624 2625 return 0; 2626 err: 2627 if (lpfc_cmd->seg_cnt) 2628 scsi_dma_unmap(scsi_cmnd); 2629 if (lpfc_cmd->prot_seg_cnt) 2630 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2631 scsi_prot_sg_count(scsi_cmnd), 2632 scsi_cmnd->sc_data_direction); 2633 2634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2635 "9023 Cannot setup S/G List for HBA" 2636 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2637 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2638 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2639 prot_group_type, num_bde); 2640 2641 lpfc_cmd->seg_cnt = 0; 2642 lpfc_cmd->prot_seg_cnt = 0; 2643 return ret; 2644 } 2645 2646 /* 2647 * This function calcuates the T10 DIF guard tag 2648 * on the specified data using a CRC algorithmn 2649 * using crc_t10dif. 2650 */ 2651 static uint16_t 2652 lpfc_bg_crc(uint8_t *data, int count) 2653 { 2654 uint16_t crc = 0; 2655 uint16_t x; 2656 2657 crc = crc_t10dif(data, count); 2658 x = cpu_to_be16(crc); 2659 return x; 2660 } 2661 2662 /* 2663 * This function calcuates the T10 DIF guard tag 2664 * on the specified data using a CSUM algorithmn 2665 * using ip_compute_csum. 2666 */ 2667 static uint16_t 2668 lpfc_bg_csum(uint8_t *data, int count) 2669 { 2670 uint16_t ret; 2671 2672 ret = ip_compute_csum(data, count); 2673 return ret; 2674 } 2675 2676 /* 2677 * This function examines the protection data to try to determine 2678 * what type of T10-DIF error occurred. 2679 */ 2680 static void 2681 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2682 { 2683 struct scatterlist *sgpe; /* s/g prot entry */ 2684 struct scatterlist *sgde; /* s/g data entry */ 2685 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2686 struct scsi_dif_tuple *src = NULL; 2687 uint8_t *data_src = NULL; 2688 uint16_t guard_tag; 2689 uint16_t start_app_tag, app_tag; 2690 uint32_t start_ref_tag, ref_tag; 2691 int prot, protsegcnt; 2692 int err_type, len, data_len; 2693 int chk_ref, chk_app, chk_guard; 2694 uint16_t sum; 2695 unsigned blksize; 2696 2697 err_type = BGS_GUARD_ERR_MASK; 2698 sum = 0; 2699 guard_tag = 0; 2700 2701 /* First check to see if there is protection data to examine */ 2702 prot = scsi_get_prot_op(cmd); 2703 if ((prot == SCSI_PROT_READ_STRIP) || 2704 (prot == SCSI_PROT_WRITE_INSERT) || 2705 (prot == SCSI_PROT_NORMAL)) 2706 goto out; 2707 2708 /* Currently the driver just supports ref_tag and guard_tag checking */ 2709 chk_ref = 1; 2710 chk_app = 0; 2711 chk_guard = 0; 2712 2713 /* Setup a ptr to the protection data provided by the SCSI host */ 2714 sgpe = scsi_prot_sglist(cmd); 2715 protsegcnt = lpfc_cmd->prot_seg_cnt; 2716 2717 if (sgpe && protsegcnt) { 2718 2719 /* 2720 * We will only try to verify guard tag if the segment 2721 * data length is a multiple of the blksize. 2722 */ 2723 sgde = scsi_sglist(cmd); 2724 blksize = scsi_prot_interval(cmd); 2725 data_src = (uint8_t *)sg_virt(sgde); 2726 data_len = sgde->length; 2727 if ((data_len & (blksize - 1)) == 0) 2728 chk_guard = 1; 2729 2730 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2731 start_ref_tag = scsi_prot_ref_tag(cmd); 2732 start_app_tag = src->app_tag; 2733 len = sgpe->length; 2734 while (src && protsegcnt) { 2735 while (len) { 2736 2737 /* 2738 * First check to see if a protection data 2739 * check is valid 2740 */ 2741 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2742 (src->app_tag == T10_PI_APP_ESCAPE)) { 2743 start_ref_tag++; 2744 goto skipit; 2745 } 2746 2747 /* First Guard Tag checking */ 2748 if (chk_guard) { 2749 guard_tag = src->guard_tag; 2750 if (cmd->prot_flags 2751 & SCSI_PROT_IP_CHECKSUM) 2752 sum = lpfc_bg_csum(data_src, 2753 blksize); 2754 else 2755 sum = lpfc_bg_crc(data_src, 2756 blksize); 2757 if ((guard_tag != sum)) { 2758 err_type = BGS_GUARD_ERR_MASK; 2759 goto out; 2760 } 2761 } 2762 2763 /* Reference Tag checking */ 2764 ref_tag = be32_to_cpu(src->ref_tag); 2765 if (chk_ref && (ref_tag != start_ref_tag)) { 2766 err_type = BGS_REFTAG_ERR_MASK; 2767 goto out; 2768 } 2769 start_ref_tag++; 2770 2771 /* App Tag checking */ 2772 app_tag = src->app_tag; 2773 if (chk_app && (app_tag != start_app_tag)) { 2774 err_type = BGS_APPTAG_ERR_MASK; 2775 goto out; 2776 } 2777 skipit: 2778 len -= sizeof(struct scsi_dif_tuple); 2779 if (len < 0) 2780 len = 0; 2781 src++; 2782 2783 data_src += blksize; 2784 data_len -= blksize; 2785 2786 /* 2787 * Are we at the end of the Data segment? 2788 * The data segment is only used for Guard 2789 * tag checking. 2790 */ 2791 if (chk_guard && (data_len == 0)) { 2792 chk_guard = 0; 2793 sgde = sg_next(sgde); 2794 if (!sgde) 2795 goto out; 2796 2797 data_src = (uint8_t *)sg_virt(sgde); 2798 data_len = sgde->length; 2799 if ((data_len & (blksize - 1)) == 0) 2800 chk_guard = 1; 2801 } 2802 } 2803 2804 /* Goto the next Protection data segment */ 2805 sgpe = sg_next(sgpe); 2806 if (sgpe) { 2807 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2808 len = sgpe->length; 2809 } else { 2810 src = NULL; 2811 } 2812 protsegcnt--; 2813 } 2814 } 2815 out: 2816 if (err_type == BGS_GUARD_ERR_MASK) { 2817 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2818 set_host_byte(cmd, DID_ABORT); 2819 phba->bg_guard_err_cnt++; 2820 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2821 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2822 scsi_prot_ref_tag(cmd), 2823 sum, guard_tag); 2824 2825 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2826 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2827 set_host_byte(cmd, DID_ABORT); 2828 2829 phba->bg_reftag_err_cnt++; 2830 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2831 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2832 scsi_prot_ref_tag(cmd), 2833 ref_tag, start_ref_tag); 2834 2835 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2836 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2837 set_host_byte(cmd, DID_ABORT); 2838 2839 phba->bg_apptag_err_cnt++; 2840 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2841 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2842 scsi_prot_ref_tag(cmd), 2843 app_tag, start_app_tag); 2844 } 2845 } 2846 2847 /* 2848 * This function checks for BlockGuard errors detected by 2849 * the HBA. In case of errors, the ASC/ASCQ fields in the 2850 * sense buffer will be set accordingly, paired with 2851 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2852 * detected corruption. 2853 * 2854 * Returns: 2855 * 0 - No error found 2856 * 1 - BlockGuard error found 2857 * -1 - Internal error (bad profile, ...etc) 2858 */ 2859 static int 2860 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2861 struct lpfc_iocbq *pIocbOut) 2862 { 2863 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2864 struct sli3_bg_fields *bgf; 2865 int ret = 0; 2866 struct lpfc_wcqe_complete *wcqe; 2867 u32 status; 2868 u32 bghm = 0; 2869 u32 bgstat = 0; 2870 u64 failing_sector = 0; 2871 2872 if (phba->sli_rev == LPFC_SLI_REV4) { 2873 wcqe = &pIocbOut->wcqe_cmpl; 2874 status = bf_get(lpfc_wcqe_c_status, wcqe); 2875 2876 if (status == CQE_STATUS_DI_ERROR) { 2877 /* Guard Check failed */ 2878 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) 2879 bgstat |= BGS_GUARD_ERR_MASK; 2880 2881 /* AppTag Check failed */ 2882 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) 2883 bgstat |= BGS_APPTAG_ERR_MASK; 2884 2885 /* RefTag Check failed */ 2886 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) 2887 bgstat |= BGS_REFTAG_ERR_MASK; 2888 2889 /* Check to see if there was any good data before the 2890 * error 2891 */ 2892 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2893 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2894 bghm = wcqe->total_data_placed; 2895 } 2896 2897 /* 2898 * Set ALL the error bits to indicate we don't know what 2899 * type of error it is. 2900 */ 2901 if (!bgstat) 2902 bgstat |= (BGS_REFTAG_ERR_MASK | 2903 BGS_APPTAG_ERR_MASK | 2904 BGS_GUARD_ERR_MASK); 2905 } 2906 2907 } else { 2908 bgf = &pIocbOut->iocb.unsli3.sli3_bg; 2909 bghm = bgf->bghm; 2910 bgstat = bgf->bgstat; 2911 } 2912 2913 if (lpfc_bgs_get_invalid_prof(bgstat)) { 2914 cmd->result = DID_ERROR << 16; 2915 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2916 "9072 BLKGRD: Invalid BG Profile in cmd " 2917 "0x%x reftag 0x%x blk cnt 0x%x " 2918 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2919 scsi_prot_ref_tag(cmd), 2920 scsi_logical_block_count(cmd), bgstat, bghm); 2921 ret = (-1); 2922 goto out; 2923 } 2924 2925 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 2926 cmd->result = DID_ERROR << 16; 2927 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2928 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 2929 "0x%x reftag 0x%x blk cnt 0x%x " 2930 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2931 scsi_prot_ref_tag(cmd), 2932 scsi_logical_block_count(cmd), bgstat, bghm); 2933 ret = (-1); 2934 goto out; 2935 } 2936 2937 if (lpfc_bgs_get_guard_err(bgstat)) { 2938 ret = 1; 2939 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2940 set_host_byte(cmd, DID_ABORT); 2941 phba->bg_guard_err_cnt++; 2942 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2943 "9055 BLKGRD: Guard Tag error in cmd " 2944 "0x%x reftag 0x%x blk cnt 0x%x " 2945 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2946 scsi_prot_ref_tag(cmd), 2947 scsi_logical_block_count(cmd), bgstat, bghm); 2948 } 2949 2950 if (lpfc_bgs_get_reftag_err(bgstat)) { 2951 ret = 1; 2952 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2953 set_host_byte(cmd, DID_ABORT); 2954 phba->bg_reftag_err_cnt++; 2955 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2956 "9056 BLKGRD: Ref Tag error in cmd " 2957 "0x%x reftag 0x%x blk cnt 0x%x " 2958 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2959 scsi_prot_ref_tag(cmd), 2960 scsi_logical_block_count(cmd), bgstat, bghm); 2961 } 2962 2963 if (lpfc_bgs_get_apptag_err(bgstat)) { 2964 ret = 1; 2965 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2966 set_host_byte(cmd, DID_ABORT); 2967 phba->bg_apptag_err_cnt++; 2968 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2969 "9061 BLKGRD: App Tag error in cmd " 2970 "0x%x reftag 0x%x blk cnt 0x%x " 2971 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2972 scsi_prot_ref_tag(cmd), 2973 scsi_logical_block_count(cmd), bgstat, bghm); 2974 } 2975 2976 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 2977 /* 2978 * setup sense data descriptor 0 per SPC-4 as an information 2979 * field, and put the failing LBA in it. 2980 * This code assumes there was also a guard/app/ref tag error 2981 * indication. 2982 */ 2983 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 2984 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 2985 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 2986 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 2987 2988 /* bghm is a "on the wire" FC frame based count */ 2989 switch (scsi_get_prot_op(cmd)) { 2990 case SCSI_PROT_READ_INSERT: 2991 case SCSI_PROT_WRITE_STRIP: 2992 bghm /= cmd->device->sector_size; 2993 break; 2994 case SCSI_PROT_READ_STRIP: 2995 case SCSI_PROT_WRITE_INSERT: 2996 case SCSI_PROT_READ_PASS: 2997 case SCSI_PROT_WRITE_PASS: 2998 bghm /= (cmd->device->sector_size + 2999 sizeof(struct scsi_dif_tuple)); 3000 break; 3001 } 3002 3003 failing_sector = scsi_get_lba(cmd); 3004 failing_sector += bghm; 3005 3006 /* Descriptor Information */ 3007 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3008 } 3009 3010 if (!ret) { 3011 /* No error was reported - problem in FW? */ 3012 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3013 "9057 BLKGRD: Unknown error in cmd " 3014 "0x%x reftag 0x%x blk cnt 0x%x " 3015 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3016 scsi_prot_ref_tag(cmd), 3017 scsi_logical_block_count(cmd), bgstat, bghm); 3018 3019 /* Calculate what type of error it was */ 3020 lpfc_calc_bg_err(phba, lpfc_cmd); 3021 } 3022 out: 3023 return ret; 3024 } 3025 3026 /** 3027 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3028 * @phba: The Hba for which this call is being executed. 3029 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3030 * 3031 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3032 * field of @lpfc_cmd for device with SLI-4 interface spec. 3033 * 3034 * Return codes: 3035 * 2 - Error - Do not retry 3036 * 1 - Error - Retry 3037 * 0 - Success 3038 **/ 3039 static int 3040 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3041 { 3042 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3043 struct scatterlist *sgel = NULL; 3044 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3045 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3046 struct sli4_sge *first_data_sgl; 3047 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3048 struct lpfc_vport *vport = phba->pport; 3049 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3050 dma_addr_t physaddr; 3051 uint32_t dma_len; 3052 uint32_t dma_offset = 0; 3053 int nseg, i, j; 3054 struct ulp_bde64 *bde; 3055 bool lsp_just_set = false; 3056 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3057 3058 /* 3059 * There are three possibilities here - use scatter-gather segment, use 3060 * the single mapping, or neither. Start the lpfc command prep by 3061 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3062 * data bde entry. 3063 */ 3064 if (scsi_sg_count(scsi_cmnd)) { 3065 /* 3066 * The driver stores the segment count returned from dma_map_sg 3067 * because this a count of dma-mappings used to map the use_sg 3068 * pages. They are not guaranteed to be the same for those 3069 * architectures that implement an IOMMU. 3070 */ 3071 3072 nseg = scsi_dma_map(scsi_cmnd); 3073 if (unlikely(nseg <= 0)) 3074 return 1; 3075 sgl += 1; 3076 /* clear the last flag in the fcp_rsp map entry */ 3077 sgl->word2 = le32_to_cpu(sgl->word2); 3078 bf_set(lpfc_sli4_sge_last, sgl, 0); 3079 sgl->word2 = cpu_to_le32(sgl->word2); 3080 sgl += 1; 3081 first_data_sgl = sgl; 3082 lpfc_cmd->seg_cnt = nseg; 3083 if (!phba->cfg_xpsgl && 3084 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3086 "9074 BLKGRD:" 3087 " %s: Too many sg segments from " 3088 "dma_map_sg. Config %d, seg_cnt %d\n", 3089 __func__, phba->cfg_sg_seg_cnt, 3090 lpfc_cmd->seg_cnt); 3091 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3092 lpfc_cmd->seg_cnt = 0; 3093 scsi_dma_unmap(scsi_cmnd); 3094 return 2; 3095 } 3096 3097 /* 3098 * The driver established a maximum scatter-gather segment count 3099 * during probe that limits the number of sg elements in any 3100 * single scsi command. Just run through the seg_cnt and format 3101 * the sge's. 3102 * When using SLI-3 the driver will try to fit all the BDEs into 3103 * the IOCB. If it can't then the BDEs get added to a BPL as it 3104 * does for SLI-2 mode. 3105 */ 3106 3107 /* for tracking segment boundaries */ 3108 sgel = scsi_sglist(scsi_cmnd); 3109 j = 2; 3110 for (i = 0; i < nseg; i++) { 3111 sgl->word2 = 0; 3112 if (nseg == 1) { 3113 bf_set(lpfc_sli4_sge_last, sgl, 1); 3114 bf_set(lpfc_sli4_sge_type, sgl, 3115 LPFC_SGE_TYPE_DATA); 3116 } else { 3117 bf_set(lpfc_sli4_sge_last, sgl, 0); 3118 3119 /* do we need to expand the segment */ 3120 if (!lsp_just_set && 3121 !((j + 1) % phba->border_sge_num) && 3122 ((nseg - 1) != i)) { 3123 /* set LSP type */ 3124 bf_set(lpfc_sli4_sge_type, sgl, 3125 LPFC_SGE_TYPE_LSP); 3126 3127 sgl_xtra = lpfc_get_sgl_per_hdwq( 3128 phba, lpfc_cmd); 3129 3130 if (unlikely(!sgl_xtra)) { 3131 lpfc_cmd->seg_cnt = 0; 3132 scsi_dma_unmap(scsi_cmnd); 3133 return 1; 3134 } 3135 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3136 sgl_xtra->dma_phys_sgl)); 3137 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3138 sgl_xtra->dma_phys_sgl)); 3139 3140 } else { 3141 bf_set(lpfc_sli4_sge_type, sgl, 3142 LPFC_SGE_TYPE_DATA); 3143 } 3144 } 3145 3146 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3147 LPFC_SGE_TYPE_LSP)) { 3148 if ((nseg - 1) == i) 3149 bf_set(lpfc_sli4_sge_last, sgl, 1); 3150 3151 physaddr = sg_dma_address(sgel); 3152 dma_len = sg_dma_len(sgel); 3153 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3154 physaddr)); 3155 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3156 physaddr)); 3157 3158 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3159 sgl->word2 = cpu_to_le32(sgl->word2); 3160 sgl->sge_len = cpu_to_le32(dma_len); 3161 3162 dma_offset += dma_len; 3163 sgel = sg_next(sgel); 3164 3165 sgl++; 3166 lsp_just_set = false; 3167 3168 } else { 3169 sgl->word2 = cpu_to_le32(sgl->word2); 3170 sgl->sge_len = cpu_to_le32( 3171 phba->cfg_sg_dma_buf_size); 3172 3173 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3174 i = i - 1; 3175 3176 lsp_just_set = true; 3177 } 3178 3179 j++; 3180 } 3181 3182 /* PBDE support for first data SGE only. 3183 * For FCoE, we key off Performance Hints. 3184 * For FC, we key off lpfc_enable_pbde. 3185 */ 3186 if (nseg == 1 && 3187 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3188 phba->cfg_enable_pbde)) { 3189 /* Words 13-15 */ 3190 bde = (struct ulp_bde64 *) 3191 &wqe->words[13]; 3192 bde->addrLow = first_data_sgl->addr_lo; 3193 bde->addrHigh = first_data_sgl->addr_hi; 3194 bde->tus.f.bdeSize = 3195 le32_to_cpu(first_data_sgl->sge_len); 3196 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3197 bde->tus.w = cpu_to_le32(bde->tus.w); 3198 3199 /* Word 11 - set PBDE bit */ 3200 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3201 } else { 3202 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3203 /* Word 11 - PBDE bit disabled by default template */ 3204 } 3205 } else { 3206 sgl += 1; 3207 /* set the last flag in the fcp_rsp map entry */ 3208 sgl->word2 = le32_to_cpu(sgl->word2); 3209 bf_set(lpfc_sli4_sge_last, sgl, 1); 3210 sgl->word2 = cpu_to_le32(sgl->word2); 3211 3212 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3213 phba->cfg_enable_pbde) { 3214 bde = (struct ulp_bde64 *) 3215 &wqe->words[13]; 3216 memset(bde, 0, (sizeof(uint32_t) * 3)); 3217 } 3218 } 3219 3220 /* 3221 * Finish initializing those IOCB fields that are dependent on the 3222 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3223 * explicitly reinitialized. 3224 * all iocb memory resources are reused. 3225 */ 3226 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3227 /* Set first-burst provided it was successfully negotiated */ 3228 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3229 vport->cfg_first_burst_size && 3230 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3231 u32 init_len, total_len; 3232 3233 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3234 init_len = min(total_len, vport->cfg_first_burst_size); 3235 3236 /* Word 4 & 5 */ 3237 wqe->fcp_iwrite.initial_xfer_len = init_len; 3238 wqe->fcp_iwrite.total_xfer_len = total_len; 3239 } else { 3240 /* Word 4 */ 3241 wqe->fcp_iwrite.total_xfer_len = 3242 be32_to_cpu(fcp_cmnd->fcpDl); 3243 } 3244 3245 /* 3246 * If the OAS driver feature is enabled and the lun is enabled for 3247 * OAS, set the oas iocb related flags. 3248 */ 3249 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3250 scsi_cmnd->device->hostdata)->oas_enabled) { 3251 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3252 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3253 scsi_cmnd->device->hostdata)->priority; 3254 3255 /* Word 10 */ 3256 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3257 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3258 3259 if (lpfc_cmd->cur_iocbq.priority) 3260 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3261 (lpfc_cmd->cur_iocbq.priority << 1)); 3262 else 3263 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3264 (phba->cfg_XLanePriority << 1)); 3265 } 3266 3267 return 0; 3268 } 3269 3270 /** 3271 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3272 * @phba: The Hba for which this call is being executed. 3273 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3274 * 3275 * This is the protection/DIF aware version of 3276 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3277 * two functions eventually, but for now, it's here 3278 * Return codes: 3279 * 2 - Error - Do not retry 3280 * 1 - Error - Retry 3281 * 0 - Success 3282 **/ 3283 static int 3284 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3285 struct lpfc_io_buf *lpfc_cmd) 3286 { 3287 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3288 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3289 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3290 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3291 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3292 uint32_t num_sge = 0; 3293 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3294 int prot_group_type = 0; 3295 int fcpdl; 3296 int ret = 1; 3297 struct lpfc_vport *vport = phba->pport; 3298 3299 /* 3300 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3301 * fcp_rsp regions to the first data sge entry 3302 */ 3303 if (scsi_sg_count(scsi_cmnd)) { 3304 /* 3305 * The driver stores the segment count returned from dma_map_sg 3306 * because this a count of dma-mappings used to map the use_sg 3307 * pages. They are not guaranteed to be the same for those 3308 * architectures that implement an IOMMU. 3309 */ 3310 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3311 scsi_sglist(scsi_cmnd), 3312 scsi_sg_count(scsi_cmnd), datadir); 3313 if (unlikely(!datasegcnt)) 3314 return 1; 3315 3316 sgl += 1; 3317 /* clear the last flag in the fcp_rsp map entry */ 3318 sgl->word2 = le32_to_cpu(sgl->word2); 3319 bf_set(lpfc_sli4_sge_last, sgl, 0); 3320 sgl->word2 = cpu_to_le32(sgl->word2); 3321 3322 sgl += 1; 3323 lpfc_cmd->seg_cnt = datasegcnt; 3324 3325 /* First check if data segment count from SCSI Layer is good */ 3326 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3327 !phba->cfg_xpsgl) { 3328 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3329 ret = 2; 3330 goto err; 3331 } 3332 3333 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3334 3335 switch (prot_group_type) { 3336 case LPFC_PG_TYPE_NO_DIF: 3337 /* Here we need to add a DISEED to the count */ 3338 if (((lpfc_cmd->seg_cnt + 1) > 3339 phba->cfg_total_seg_cnt) && 3340 !phba->cfg_xpsgl) { 3341 ret = 2; 3342 goto err; 3343 } 3344 3345 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3346 datasegcnt, lpfc_cmd); 3347 3348 /* we should have 2 or more entries in buffer list */ 3349 if (num_sge < 2) { 3350 ret = 2; 3351 goto err; 3352 } 3353 break; 3354 3355 case LPFC_PG_TYPE_DIF_BUF: 3356 /* 3357 * This type indicates that protection buffers are 3358 * passed to the driver, so that needs to be prepared 3359 * for DMA 3360 */ 3361 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3362 scsi_prot_sglist(scsi_cmnd), 3363 scsi_prot_sg_count(scsi_cmnd), datadir); 3364 if (unlikely(!protsegcnt)) { 3365 scsi_dma_unmap(scsi_cmnd); 3366 return 1; 3367 } 3368 3369 lpfc_cmd->prot_seg_cnt = protsegcnt; 3370 /* 3371 * There is a minimun of 3 SGEs used for every 3372 * protection data segment. 3373 */ 3374 if (((lpfc_cmd->prot_seg_cnt * 3) > 3375 (phba->cfg_total_seg_cnt - 2)) && 3376 !phba->cfg_xpsgl) { 3377 ret = 2; 3378 goto err; 3379 } 3380 3381 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3382 datasegcnt, protsegcnt, lpfc_cmd); 3383 3384 /* we should have 3 or more entries in buffer list */ 3385 if (num_sge < 3 || 3386 (num_sge > phba->cfg_total_seg_cnt && 3387 !phba->cfg_xpsgl)) { 3388 ret = 2; 3389 goto err; 3390 } 3391 break; 3392 3393 case LPFC_PG_TYPE_INVALID: 3394 default: 3395 scsi_dma_unmap(scsi_cmnd); 3396 lpfc_cmd->seg_cnt = 0; 3397 3398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3399 "9083 Unexpected protection group %i\n", 3400 prot_group_type); 3401 return 2; 3402 } 3403 } 3404 3405 switch (scsi_get_prot_op(scsi_cmnd)) { 3406 case SCSI_PROT_WRITE_STRIP: 3407 case SCSI_PROT_READ_STRIP: 3408 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; 3409 break; 3410 case SCSI_PROT_WRITE_INSERT: 3411 case SCSI_PROT_READ_INSERT: 3412 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; 3413 break; 3414 case SCSI_PROT_WRITE_PASS: 3415 case SCSI_PROT_READ_PASS: 3416 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; 3417 break; 3418 } 3419 3420 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3421 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3422 3423 /* Set first-burst provided it was successfully negotiated */ 3424 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3425 vport->cfg_first_burst_size && 3426 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3427 u32 init_len, total_len; 3428 3429 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3430 init_len = min(total_len, vport->cfg_first_burst_size); 3431 3432 /* Word 4 & 5 */ 3433 wqe->fcp_iwrite.initial_xfer_len = init_len; 3434 wqe->fcp_iwrite.total_xfer_len = total_len; 3435 } else { 3436 /* Word 4 */ 3437 wqe->fcp_iwrite.total_xfer_len = 3438 be32_to_cpu(fcp_cmnd->fcpDl); 3439 } 3440 3441 /* 3442 * If the OAS driver feature is enabled and the lun is enabled for 3443 * OAS, set the oas iocb related flags. 3444 */ 3445 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3446 scsi_cmnd->device->hostdata)->oas_enabled) { 3447 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3448 3449 /* Word 10 */ 3450 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3451 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3452 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3453 (phba->cfg_XLanePriority << 1)); 3454 } 3455 3456 /* Word 7. DIF Flags */ 3457 if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) 3458 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3459 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) 3460 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3461 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) 3462 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3463 3464 lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | 3465 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3466 3467 return 0; 3468 err: 3469 if (lpfc_cmd->seg_cnt) 3470 scsi_dma_unmap(scsi_cmnd); 3471 if (lpfc_cmd->prot_seg_cnt) 3472 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3473 scsi_prot_sg_count(scsi_cmnd), 3474 scsi_cmnd->sc_data_direction); 3475 3476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3477 "9084 Cannot setup S/G List for HBA " 3478 "IO segs %d/%d SGL %d SCSI %d: %d %d %d\n", 3479 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3480 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3481 prot_group_type, num_sge, ret); 3482 3483 lpfc_cmd->seg_cnt = 0; 3484 lpfc_cmd->prot_seg_cnt = 0; 3485 return ret; 3486 } 3487 3488 /** 3489 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3490 * @phba: The Hba for which this call is being executed. 3491 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3492 * 3493 * This routine wraps the actual DMA mapping function pointer from the 3494 * lpfc_hba struct. 3495 * 3496 * Return codes: 3497 * 1 - Error 3498 * 0 - Success 3499 **/ 3500 static inline int 3501 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3502 { 3503 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3504 } 3505 3506 /** 3507 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3508 * using BlockGuard. 3509 * @phba: The Hba for which this call is being executed. 3510 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3511 * 3512 * This routine wraps the actual DMA mapping function pointer from the 3513 * lpfc_hba struct. 3514 * 3515 * Return codes: 3516 * 1 - Error 3517 * 0 - Success 3518 **/ 3519 static inline int 3520 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3521 { 3522 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3523 } 3524 3525 /** 3526 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3527 * buffer 3528 * @vport: Pointer to vport object. 3529 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3530 * @tmo: Timeout value for IO 3531 * 3532 * This routine initializes IOCB/WQE data structure from scsi command 3533 * 3534 * Return codes: 3535 * 1 - Error 3536 * 0 - Success 3537 **/ 3538 static inline int 3539 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3540 uint8_t tmo) 3541 { 3542 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3543 } 3544 3545 /** 3546 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3547 * @phba: Pointer to hba context object. 3548 * @vport: Pointer to vport object. 3549 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3550 * @fcpi_parm: FCP Initiator parameter. 3551 * 3552 * This function posts an event when there is a SCSI command reporting 3553 * error from the scsi device. 3554 **/ 3555 static void 3556 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3557 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3558 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3559 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3560 uint32_t resp_info = fcprsp->rspStatus2; 3561 uint32_t scsi_status = fcprsp->rspStatus3; 3562 struct lpfc_fast_path_event *fast_path_evt = NULL; 3563 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3564 unsigned long flags; 3565 3566 if (!pnode) 3567 return; 3568 3569 /* If there is queuefull or busy condition send a scsi event */ 3570 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3571 (cmnd->result == SAM_STAT_BUSY)) { 3572 fast_path_evt = lpfc_alloc_fast_evt(phba); 3573 if (!fast_path_evt) 3574 return; 3575 fast_path_evt->un.scsi_evt.event_type = 3576 FC_REG_SCSI_EVENT; 3577 fast_path_evt->un.scsi_evt.subcategory = 3578 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3579 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3580 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3581 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3582 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3583 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3584 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3585 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3586 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3587 fast_path_evt = lpfc_alloc_fast_evt(phba); 3588 if (!fast_path_evt) 3589 return; 3590 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3591 FC_REG_SCSI_EVENT; 3592 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3593 LPFC_EVENT_CHECK_COND; 3594 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3595 cmnd->device->lun; 3596 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3597 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3598 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3599 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3600 fast_path_evt->un.check_cond_evt.sense_key = 3601 cmnd->sense_buffer[2] & 0xf; 3602 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3603 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3604 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3605 fcpi_parm && 3606 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3607 ((scsi_status == SAM_STAT_GOOD) && 3608 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3609 /* 3610 * If status is good or resid does not match with fcp_param and 3611 * there is valid fcpi_parm, then there is a read_check error 3612 */ 3613 fast_path_evt = lpfc_alloc_fast_evt(phba); 3614 if (!fast_path_evt) 3615 return; 3616 fast_path_evt->un.read_check_error.header.event_type = 3617 FC_REG_FABRIC_EVENT; 3618 fast_path_evt->un.read_check_error.header.subcategory = 3619 LPFC_EVENT_FCPRDCHKERR; 3620 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3621 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3622 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3623 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3624 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3625 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3626 fast_path_evt->un.read_check_error.fcpiparam = 3627 fcpi_parm; 3628 } else 3629 return; 3630 3631 fast_path_evt->vport = vport; 3632 spin_lock_irqsave(&phba->hbalock, flags); 3633 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3634 spin_unlock_irqrestore(&phba->hbalock, flags); 3635 lpfc_worker_wake_up(phba); 3636 return; 3637 } 3638 3639 /** 3640 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3641 * @phba: The HBA for which this call is being executed. 3642 * @psb: The scsi buffer which is going to be un-mapped. 3643 * 3644 * This routine does DMA un-mapping of scatter gather list of scsi command 3645 * field of @lpfc_cmd for device with SLI-3 interface spec. 3646 **/ 3647 static void 3648 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3649 { 3650 /* 3651 * There are only two special cases to consider. (1) the scsi command 3652 * requested scatter-gather usage or (2) the scsi command allocated 3653 * a request buffer, but did not request use_sg. There is a third 3654 * case, but it does not require resource deallocation. 3655 */ 3656 if (psb->seg_cnt > 0) 3657 scsi_dma_unmap(psb->pCmd); 3658 if (psb->prot_seg_cnt > 0) 3659 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3660 scsi_prot_sg_count(psb->pCmd), 3661 psb->pCmd->sc_data_direction); 3662 } 3663 3664 /** 3665 * lpfc_unblock_requests - allow further commands to be queued. 3666 * @phba: pointer to phba object 3667 * 3668 * For single vport, just call scsi_unblock_requests on physical port. 3669 * For multiple vports, send scsi_unblock_requests for all the vports. 3670 */ 3671 void 3672 lpfc_unblock_requests(struct lpfc_hba *phba) 3673 { 3674 struct lpfc_vport **vports; 3675 struct Scsi_Host *shost; 3676 int i; 3677 3678 if (phba->sli_rev == LPFC_SLI_REV4 && 3679 !phba->sli4_hba.max_cfg_param.vpi_used) { 3680 shost = lpfc_shost_from_vport(phba->pport); 3681 scsi_unblock_requests(shost); 3682 return; 3683 } 3684 3685 vports = lpfc_create_vport_work_array(phba); 3686 if (vports != NULL) 3687 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3688 shost = lpfc_shost_from_vport(vports[i]); 3689 scsi_unblock_requests(shost); 3690 } 3691 lpfc_destroy_vport_work_array(phba, vports); 3692 } 3693 3694 /** 3695 * lpfc_block_requests - prevent further commands from being queued. 3696 * @phba: pointer to phba object 3697 * 3698 * For single vport, just call scsi_block_requests on physical port. 3699 * For multiple vports, send scsi_block_requests for all the vports. 3700 */ 3701 void 3702 lpfc_block_requests(struct lpfc_hba *phba) 3703 { 3704 struct lpfc_vport **vports; 3705 struct Scsi_Host *shost; 3706 int i; 3707 3708 if (atomic_read(&phba->cmf_stop_io)) 3709 return; 3710 3711 if (phba->sli_rev == LPFC_SLI_REV4 && 3712 !phba->sli4_hba.max_cfg_param.vpi_used) { 3713 shost = lpfc_shost_from_vport(phba->pport); 3714 scsi_block_requests(shost); 3715 return; 3716 } 3717 3718 vports = lpfc_create_vport_work_array(phba); 3719 if (vports != NULL) 3720 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3721 shost = lpfc_shost_from_vport(vports[i]); 3722 scsi_block_requests(shost); 3723 } 3724 lpfc_destroy_vport_work_array(phba, vports); 3725 } 3726 3727 /** 3728 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion 3729 * @phba: The HBA for which this call is being executed. 3730 * @time: The latency of the IO that completed (in ns) 3731 * @size: The size of the IO that completed 3732 * @shost: SCSI host the IO completed on (NULL for a NVME IO) 3733 * 3734 * The routine adjusts the various Burst and Bandwidth counters used in 3735 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, 3736 * that means the IO was never issued to the HBA, so this routine is 3737 * just being called to cleanup the counter from a previous 3738 * lpfc_update_cmf_cmd call. 3739 */ 3740 int 3741 lpfc_update_cmf_cmpl(struct lpfc_hba *phba, 3742 uint64_t time, uint32_t size, struct Scsi_Host *shost) 3743 { 3744 struct lpfc_cgn_stat *cgs; 3745 3746 if (time != LPFC_CGN_NOT_SENT) { 3747 /* lat is ns coming in, save latency in us */ 3748 if (time < 1000) 3749 time = 1; 3750 else 3751 time = div_u64(time + 500, 1000); /* round it */ 3752 3753 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); 3754 atomic64_add(size, &cgs->rcv_bytes); 3755 atomic64_add(time, &cgs->rx_latency); 3756 atomic_inc(&cgs->rx_io_cnt); 3757 } 3758 return 0; 3759 } 3760 3761 /** 3762 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission 3763 * @phba: The HBA for which this call is being executed. 3764 * @size: The size of the IO that will be issued 3765 * 3766 * The routine adjusts the various Burst and Bandwidth counters used in 3767 * Congestion management and E2E. 3768 */ 3769 int 3770 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) 3771 { 3772 uint64_t total; 3773 struct lpfc_cgn_stat *cgs; 3774 int cpu; 3775 3776 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ 3777 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 3778 phba->cmf_max_bytes_per_interval) { 3779 total = 0; 3780 for_each_present_cpu(cpu) { 3781 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3782 total += atomic64_read(&cgs->total_bytes); 3783 } 3784 if (total >= phba->cmf_max_bytes_per_interval) { 3785 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { 3786 lpfc_block_requests(phba); 3787 phba->cmf_last_ts = 3788 lpfc_calc_cmf_latency(phba); 3789 } 3790 atomic_inc(&phba->cmf_busy); 3791 return -EBUSY; 3792 } 3793 if (size > atomic_read(&phba->rx_max_read_cnt)) 3794 atomic_set(&phba->rx_max_read_cnt, size); 3795 } 3796 3797 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); 3798 atomic64_add(size, &cgs->total_bytes); 3799 return 0; 3800 } 3801 3802 /** 3803 * lpfc_handle_fcp_err - FCP response handler 3804 * @vport: The virtual port for which this call is being executed. 3805 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3806 * @fcpi_parm: FCP Initiator parameter. 3807 * 3808 * This routine is called to process response IOCB with status field 3809 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3810 * based upon SCSI and FCP error. 3811 **/ 3812 static void 3813 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3814 uint32_t fcpi_parm) 3815 { 3816 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3817 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3818 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3819 uint32_t resp_info = fcprsp->rspStatus2; 3820 uint32_t scsi_status = fcprsp->rspStatus3; 3821 uint32_t *lp; 3822 uint32_t host_status = DID_OK; 3823 uint32_t rsplen = 0; 3824 uint32_t fcpDl; 3825 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3826 3827 3828 /* 3829 * If this is a task management command, there is no 3830 * scsi packet associated with this lpfc_cmd. The driver 3831 * consumes it. 3832 */ 3833 if (fcpcmd->fcpCntl2) { 3834 scsi_status = 0; 3835 goto out; 3836 } 3837 3838 if (resp_info & RSP_LEN_VALID) { 3839 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3840 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3841 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3842 "2719 Invalid response length: " 3843 "tgt x%x lun x%llx cmnd x%x rsplen " 3844 "x%x\n", cmnd->device->id, 3845 cmnd->device->lun, cmnd->cmnd[0], 3846 rsplen); 3847 host_status = DID_ERROR; 3848 goto out; 3849 } 3850 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3851 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3852 "2757 Protocol failure detected during " 3853 "processing of FCP I/O op: " 3854 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 3855 cmnd->device->id, 3856 cmnd->device->lun, cmnd->cmnd[0], 3857 fcprsp->rspInfo3); 3858 host_status = DID_ERROR; 3859 goto out; 3860 } 3861 } 3862 3863 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 3864 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 3865 if (snslen > SCSI_SENSE_BUFFERSIZE) 3866 snslen = SCSI_SENSE_BUFFERSIZE; 3867 3868 if (resp_info & RSP_LEN_VALID) 3869 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3870 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 3871 } 3872 lp = (uint32_t *)cmnd->sense_buffer; 3873 3874 /* special handling for under run conditions */ 3875 if (!scsi_status && (resp_info & RESID_UNDER)) { 3876 /* don't log under runs if fcp set... */ 3877 if (vport->cfg_log_verbose & LOG_FCP) 3878 logit = LOG_FCP_ERROR; 3879 /* unless operator says so */ 3880 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 3881 logit = LOG_FCP_UNDER; 3882 } 3883 3884 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3885 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3886 "Data: x%x x%x x%x x%x x%x\n", 3887 cmnd->cmnd[0], scsi_status, 3888 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 3889 be32_to_cpu(fcprsp->rspResId), 3890 be32_to_cpu(fcprsp->rspSnsLen), 3891 be32_to_cpu(fcprsp->rspRspLen), 3892 fcprsp->rspInfo3); 3893 3894 scsi_set_resid(cmnd, 0); 3895 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 3896 if (resp_info & RESID_UNDER) { 3897 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 3898 3899 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 3900 "9025 FCP Underrun, expected %d, " 3901 "residual %d Data: x%x x%x x%x\n", 3902 fcpDl, 3903 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 3904 cmnd->underflow); 3905 3906 /* 3907 * If there is an under run, check if under run reported by 3908 * storage array is same as the under run reported by HBA. 3909 * If this is not same, there is a dropped frame. 3910 */ 3911 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 3912 lpfc_printf_vlog(vport, KERN_WARNING, 3913 LOG_FCP | LOG_FCP_ERROR, 3914 "9026 FCP Read Check Error " 3915 "and Underrun Data: x%x x%x x%x x%x\n", 3916 fcpDl, 3917 scsi_get_resid(cmnd), fcpi_parm, 3918 cmnd->cmnd[0]); 3919 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3920 host_status = DID_ERROR; 3921 } 3922 /* 3923 * The cmnd->underflow is the minimum number of bytes that must 3924 * be transferred for this command. Provided a sense condition 3925 * is not present, make sure the actual amount transferred is at 3926 * least the underflow value or fail. 3927 */ 3928 if (!(resp_info & SNS_LEN_VALID) && 3929 (scsi_status == SAM_STAT_GOOD) && 3930 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 3931 < cmnd->underflow)) { 3932 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3933 "9027 FCP command x%x residual " 3934 "underrun converted to error " 3935 "Data: x%x x%x x%x\n", 3936 cmnd->cmnd[0], scsi_bufflen(cmnd), 3937 scsi_get_resid(cmnd), cmnd->underflow); 3938 host_status = DID_ERROR; 3939 } 3940 } else if (resp_info & RESID_OVER) { 3941 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3942 "9028 FCP command x%x residual overrun error. " 3943 "Data: x%x x%x\n", cmnd->cmnd[0], 3944 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 3945 host_status = DID_ERROR; 3946 3947 /* 3948 * Check SLI validation that all the transfer was actually done 3949 * (fcpi_parm should be zero). Apply check only to reads. 3950 */ 3951 } else if (fcpi_parm) { 3952 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3953 "9029 FCP %s Check Error Data: " 3954 "x%x x%x x%x x%x x%x\n", 3955 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 3956 "Read" : "Write"), 3957 fcpDl, be32_to_cpu(fcprsp->rspResId), 3958 fcpi_parm, cmnd->cmnd[0], scsi_status); 3959 3960 /* There is some issue with the LPe12000 that causes it 3961 * to miscalculate the fcpi_parm and falsely trip this 3962 * recovery logic. Detect this case and don't error when true. 3963 */ 3964 if (fcpi_parm > fcpDl) 3965 goto out; 3966 3967 switch (scsi_status) { 3968 case SAM_STAT_GOOD: 3969 case SAM_STAT_CHECK_CONDITION: 3970 /* Fabric dropped a data frame. Fail any successful 3971 * command in which we detected dropped frames. 3972 * A status of good or some check conditions could 3973 * be considered a successful command. 3974 */ 3975 host_status = DID_ERROR; 3976 break; 3977 } 3978 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3979 } 3980 3981 out: 3982 cmnd->result = host_status << 16 | scsi_status; 3983 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 3984 } 3985 3986 /** 3987 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 3988 * @phba: The hba for which this call is being executed. 3989 * @pwqeIn: The command WQE for the scsi cmnd. 3990 * @pwqeOut: Pointer to driver response WQE object. 3991 * 3992 * This routine assigns scsi command result by looking into response WQE 3993 * status field appropriately. This routine handles QUEUE FULL condition as 3994 * well by ramping down device queue depth. 3995 **/ 3996 static void 3997 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 3998 struct lpfc_iocbq *pwqeOut) 3999 { 4000 struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; 4001 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; 4002 struct lpfc_vport *vport = pwqeIn->vport; 4003 struct lpfc_rport_data *rdata; 4004 struct lpfc_nodelist *ndlp; 4005 struct scsi_cmnd *cmd; 4006 unsigned long flags; 4007 struct lpfc_fast_path_event *fast_path_evt; 4008 struct Scsi_Host *shost; 4009 u32 logit = LOG_FCP; 4010 u32 idx; 4011 u32 lat; 4012 u8 wait_xb_clr = 0; 4013 4014 /* Sanity check on return of outstanding command */ 4015 if (!lpfc_cmd) { 4016 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4017 "9032 Null lpfc_cmd pointer. No " 4018 "release, skip completion\n"); 4019 return; 4020 } 4021 4022 rdata = lpfc_cmd->rdata; 4023 ndlp = rdata->pnode; 4024 4025 /* Sanity check on return of outstanding command */ 4026 cmd = lpfc_cmd->pCmd; 4027 if (!cmd) { 4028 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4029 "9042 I/O completion: Not an active IO\n"); 4030 lpfc_release_scsi_buf(phba, lpfc_cmd); 4031 return; 4032 } 4033 /* Guard against abort handler being called at same time */ 4034 spin_lock(&lpfc_cmd->buf_lock); 4035 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4036 if (phba->sli4_hba.hdwq) 4037 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4038 4039 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4040 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4041 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4042 #endif 4043 shost = cmd->device->host; 4044 4045 lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); 4046 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4047 4048 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4049 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4050 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4051 if (phba->cfg_fcp_wait_abts_rsp) 4052 wait_xb_clr = 1; 4053 } 4054 4055 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4056 if (lpfc_cmd->prot_data_type) { 4057 struct scsi_dif_tuple *src = NULL; 4058 4059 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4060 /* 4061 * Used to restore any changes to protection 4062 * data for error injection. 4063 */ 4064 switch (lpfc_cmd->prot_data_type) { 4065 case LPFC_INJERR_REFTAG: 4066 src->ref_tag = 4067 lpfc_cmd->prot_data; 4068 break; 4069 case LPFC_INJERR_APPTAG: 4070 src->app_tag = 4071 (uint16_t)lpfc_cmd->prot_data; 4072 break; 4073 case LPFC_INJERR_GUARD: 4074 src->guard_tag = 4075 (uint16_t)lpfc_cmd->prot_data; 4076 break; 4077 default: 4078 break; 4079 } 4080 4081 lpfc_cmd->prot_data = 0; 4082 lpfc_cmd->prot_data_type = 0; 4083 lpfc_cmd->prot_data_segment = NULL; 4084 } 4085 #endif 4086 if (unlikely(lpfc_cmd->status)) { 4087 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4088 !lpfc_cmd->fcp_rsp->rspStatus3 && 4089 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4090 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4091 logit = 0; 4092 else 4093 logit = LOG_FCP | LOG_FCP_UNDER; 4094 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4095 "9034 FCP cmd x%x failed <%d/%lld> " 4096 "status: x%x result: x%x " 4097 "sid: x%x did: x%x oxid: x%x " 4098 "Data: x%x x%x x%x\n", 4099 cmd->cmnd[0], 4100 cmd->device ? cmd->device->id : 0xffff, 4101 cmd->device ? cmd->device->lun : 0xffff, 4102 lpfc_cmd->status, lpfc_cmd->result, 4103 vport->fc_myDID, 4104 (ndlp) ? ndlp->nlp_DID : 0, 4105 lpfc_cmd->cur_iocbq.sli4_xritag, 4106 wcqe->parameter, wcqe->total_data_placed, 4107 lpfc_cmd->cur_iocbq.iotag); 4108 } 4109 4110 switch (lpfc_cmd->status) { 4111 case CQE_STATUS_SUCCESS: 4112 cmd->result = DID_OK << 16; 4113 break; 4114 case CQE_STATUS_FCP_RSP_FAILURE: 4115 lpfc_handle_fcp_err(vport, lpfc_cmd, 4116 pwqeIn->wqe.fcp_iread.total_xfer_len - 4117 wcqe->total_data_placed); 4118 break; 4119 case CQE_STATUS_NPORT_BSY: 4120 case CQE_STATUS_FABRIC_BSY: 4121 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4122 fast_path_evt = lpfc_alloc_fast_evt(phba); 4123 if (!fast_path_evt) 4124 break; 4125 fast_path_evt->un.fabric_evt.event_type = 4126 FC_REG_FABRIC_EVENT; 4127 fast_path_evt->un.fabric_evt.subcategory = 4128 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4129 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4130 if (ndlp) { 4131 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4132 &ndlp->nlp_portname, 4133 sizeof(struct lpfc_name)); 4134 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4135 &ndlp->nlp_nodename, 4136 sizeof(struct lpfc_name)); 4137 } 4138 fast_path_evt->vport = vport; 4139 fast_path_evt->work_evt.evt = 4140 LPFC_EVT_FASTPATH_MGMT_EVT; 4141 spin_lock_irqsave(&phba->hbalock, flags); 4142 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4143 &phba->work_list); 4144 spin_unlock_irqrestore(&phba->hbalock, flags); 4145 lpfc_worker_wake_up(phba); 4146 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4147 "9035 Fabric/Node busy FCP cmd x%x failed" 4148 " <%d/%lld> " 4149 "status: x%x result: x%x " 4150 "sid: x%x did: x%x oxid: x%x " 4151 "Data: x%x x%x x%x\n", 4152 cmd->cmnd[0], 4153 cmd->device ? cmd->device->id : 0xffff, 4154 cmd->device ? cmd->device->lun : 0xffff, 4155 lpfc_cmd->status, lpfc_cmd->result, 4156 vport->fc_myDID, 4157 (ndlp) ? ndlp->nlp_DID : 0, 4158 lpfc_cmd->cur_iocbq.sli4_xritag, 4159 wcqe->parameter, 4160 wcqe->total_data_placed, 4161 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4162 break; 4163 case CQE_STATUS_DI_ERROR: 4164 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 4165 lpfc_cmd->result = IOERR_RX_DMA_FAILED; 4166 else 4167 lpfc_cmd->result = IOERR_TX_DMA_FAILED; 4168 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, 4169 "9048 DI Error xri x%x status x%x DI ext " 4170 "status x%x data placed x%x\n", 4171 lpfc_cmd->cur_iocbq.sli4_xritag, 4172 lpfc_cmd->status, wcqe->parameter, 4173 wcqe->total_data_placed); 4174 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4175 /* BG enabled cmd. Parse BG error */ 4176 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); 4177 break; 4178 } 4179 cmd->result = DID_ERROR << 16; 4180 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4181 "9040 DI Error on unprotected cmd\n"); 4182 break; 4183 case CQE_STATUS_REMOTE_STOP: 4184 if (ndlp) { 4185 /* This I/O was aborted by the target, we don't 4186 * know the rxid and because we did not send the 4187 * ABTS we cannot generate and RRQ. 4188 */ 4189 lpfc_set_rrq_active(phba, ndlp, 4190 lpfc_cmd->cur_iocbq.sli4_lxritag, 4191 0, 0); 4192 } 4193 fallthrough; 4194 case CQE_STATUS_LOCAL_REJECT: 4195 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4196 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4197 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4198 lpfc_cmd->result == 4199 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4200 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4201 lpfc_cmd->result == 4202 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4203 cmd->result = DID_NO_CONNECT << 16; 4204 break; 4205 } 4206 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4207 lpfc_cmd->result == IOERR_LINK_DOWN || 4208 lpfc_cmd->result == IOERR_NO_RESOURCES || 4209 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4210 lpfc_cmd->result == IOERR_RPI_SUSPENDED || 4211 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4212 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4213 break; 4214 } 4215 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4216 "9036 Local Reject FCP cmd x%x failed" 4217 " <%d/%lld> " 4218 "status: x%x result: x%x " 4219 "sid: x%x did: x%x oxid: x%x " 4220 "Data: x%x x%x x%x\n", 4221 cmd->cmnd[0], 4222 cmd->device ? cmd->device->id : 0xffff, 4223 cmd->device ? cmd->device->lun : 0xffff, 4224 lpfc_cmd->status, lpfc_cmd->result, 4225 vport->fc_myDID, 4226 (ndlp) ? ndlp->nlp_DID : 0, 4227 lpfc_cmd->cur_iocbq.sli4_xritag, 4228 wcqe->parameter, 4229 wcqe->total_data_placed, 4230 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4231 fallthrough; 4232 default: 4233 cmd->result = DID_ERROR << 16; 4234 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4235 "9037 FCP Completion Error: xri %x " 4236 "status x%x result x%x [x%x] " 4237 "placed x%x\n", 4238 lpfc_cmd->cur_iocbq.sli4_xritag, 4239 lpfc_cmd->status, lpfc_cmd->result, 4240 wcqe->parameter, 4241 wcqe->total_data_placed); 4242 } 4243 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4244 u32 *lp = (u32 *)cmd->sense_buffer; 4245 4246 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4247 "9039 Iodone <%d/%llu> cmd x%px, error " 4248 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", 4249 cmd->device->id, cmd->device->lun, cmd, 4250 cmd->result, *lp, *(lp + 3), 4251 (cmd->device->sector_size) ? 4252 (u64)scsi_get_lba(cmd) : 0, 4253 cmd->retries, scsi_get_resid(cmd)); 4254 } 4255 4256 if (vport->cfg_max_scsicmpl_time && 4257 time_after(jiffies, lpfc_cmd->start_time + 4258 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4259 spin_lock_irqsave(shost->host_lock, flags); 4260 if (ndlp) { 4261 if (ndlp->cmd_qdepth > 4262 atomic_read(&ndlp->cmd_pending) && 4263 (atomic_read(&ndlp->cmd_pending) > 4264 LPFC_MIN_TGT_QDEPTH) && 4265 (cmd->cmnd[0] == READ_10 || 4266 cmd->cmnd[0] == WRITE_10)) 4267 ndlp->cmd_qdepth = 4268 atomic_read(&ndlp->cmd_pending); 4269 4270 ndlp->last_change_time = jiffies; 4271 } 4272 spin_unlock_irqrestore(shost->host_lock, flags); 4273 } 4274 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4275 4276 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4277 if (lpfc_cmd->ts_cmd_start) { 4278 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4279 lpfc_cmd->ts_data_io = ktime_get_ns(); 4280 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4281 lpfc_io_ktime(phba, lpfc_cmd); 4282 } 4283 #endif 4284 if (likely(!wait_xb_clr)) 4285 lpfc_cmd->pCmd = NULL; 4286 spin_unlock(&lpfc_cmd->buf_lock); 4287 4288 /* Check if IO qualified for CMF */ 4289 if (phba->cmf_active_mode != LPFC_CFG_OFF && 4290 cmd->sc_data_direction == DMA_FROM_DEVICE && 4291 (scsi_sg_count(cmd))) { 4292 /* Used when calculating average latency */ 4293 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; 4294 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); 4295 } 4296 4297 if (wait_xb_clr) 4298 goto out; 4299 4300 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4301 scsi_done(cmd); 4302 4303 /* 4304 * If there is an abort thread waiting for command completion 4305 * wake up the thread. 4306 */ 4307 spin_lock(&lpfc_cmd->buf_lock); 4308 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; 4309 if (lpfc_cmd->waitq) 4310 wake_up(lpfc_cmd->waitq); 4311 spin_unlock(&lpfc_cmd->buf_lock); 4312 out: 4313 lpfc_release_scsi_buf(phba, lpfc_cmd); 4314 } 4315 4316 /** 4317 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4318 * @phba: The Hba for which this call is being executed. 4319 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4320 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4321 * 4322 * This routine assigns scsi command result by looking into response IOCB 4323 * status field appropriately. This routine handles QUEUE FULL condition as 4324 * well by ramping down device queue depth. 4325 **/ 4326 static void 4327 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4328 struct lpfc_iocbq *pIocbOut) 4329 { 4330 struct lpfc_io_buf *lpfc_cmd = 4331 (struct lpfc_io_buf *) pIocbIn->io_buf; 4332 struct lpfc_vport *vport = pIocbIn->vport; 4333 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4334 struct lpfc_nodelist *pnode = rdata->pnode; 4335 struct scsi_cmnd *cmd; 4336 unsigned long flags; 4337 struct lpfc_fast_path_event *fast_path_evt; 4338 struct Scsi_Host *shost; 4339 int idx; 4340 uint32_t logit = LOG_FCP; 4341 4342 /* Guard against abort handler being called at same time */ 4343 spin_lock(&lpfc_cmd->buf_lock); 4344 4345 /* Sanity check on return of outstanding command */ 4346 cmd = lpfc_cmd->pCmd; 4347 if (!cmd || !phba) { 4348 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4349 "2621 IO completion: Not an active IO\n"); 4350 spin_unlock(&lpfc_cmd->buf_lock); 4351 return; 4352 } 4353 4354 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4355 if (phba->sli4_hba.hdwq) 4356 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4357 4358 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4359 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4360 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4361 #endif 4362 shost = cmd->device->host; 4363 4364 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4365 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4366 /* pick up SLI4 exchange busy status from HBA */ 4367 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4368 if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) 4369 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4370 4371 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4372 if (lpfc_cmd->prot_data_type) { 4373 struct scsi_dif_tuple *src = NULL; 4374 4375 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4376 /* 4377 * Used to restore any changes to protection 4378 * data for error injection. 4379 */ 4380 switch (lpfc_cmd->prot_data_type) { 4381 case LPFC_INJERR_REFTAG: 4382 src->ref_tag = 4383 lpfc_cmd->prot_data; 4384 break; 4385 case LPFC_INJERR_APPTAG: 4386 src->app_tag = 4387 (uint16_t)lpfc_cmd->prot_data; 4388 break; 4389 case LPFC_INJERR_GUARD: 4390 src->guard_tag = 4391 (uint16_t)lpfc_cmd->prot_data; 4392 break; 4393 default: 4394 break; 4395 } 4396 4397 lpfc_cmd->prot_data = 0; 4398 lpfc_cmd->prot_data_type = 0; 4399 lpfc_cmd->prot_data_segment = NULL; 4400 } 4401 #endif 4402 4403 if (unlikely(lpfc_cmd->status)) { 4404 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4405 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4406 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4407 else if (lpfc_cmd->status >= IOSTAT_CNT) 4408 lpfc_cmd->status = IOSTAT_DEFAULT; 4409 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4410 !lpfc_cmd->fcp_rsp->rspStatus3 && 4411 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4412 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4413 logit = 0; 4414 else 4415 logit = LOG_FCP | LOG_FCP_UNDER; 4416 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4417 "9030 FCP cmd x%x failed <%d/%lld> " 4418 "status: x%x result: x%x " 4419 "sid: x%x did: x%x oxid: x%x " 4420 "Data: x%x x%x\n", 4421 cmd->cmnd[0], 4422 cmd->device ? cmd->device->id : 0xffff, 4423 cmd->device ? cmd->device->lun : 0xffff, 4424 lpfc_cmd->status, lpfc_cmd->result, 4425 vport->fc_myDID, 4426 (pnode) ? pnode->nlp_DID : 0, 4427 phba->sli_rev == LPFC_SLI_REV4 ? 4428 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4429 pIocbOut->iocb.ulpContext, 4430 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4431 4432 switch (lpfc_cmd->status) { 4433 case IOSTAT_FCP_RSP_ERROR: 4434 /* Call FCP RSP handler to determine result */ 4435 lpfc_handle_fcp_err(vport, lpfc_cmd, 4436 pIocbOut->iocb.un.fcpi.fcpi_parm); 4437 break; 4438 case IOSTAT_NPORT_BSY: 4439 case IOSTAT_FABRIC_BSY: 4440 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4441 fast_path_evt = lpfc_alloc_fast_evt(phba); 4442 if (!fast_path_evt) 4443 break; 4444 fast_path_evt->un.fabric_evt.event_type = 4445 FC_REG_FABRIC_EVENT; 4446 fast_path_evt->un.fabric_evt.subcategory = 4447 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4448 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4449 if (pnode) { 4450 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4451 &pnode->nlp_portname, 4452 sizeof(struct lpfc_name)); 4453 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4454 &pnode->nlp_nodename, 4455 sizeof(struct lpfc_name)); 4456 } 4457 fast_path_evt->vport = vport; 4458 fast_path_evt->work_evt.evt = 4459 LPFC_EVT_FASTPATH_MGMT_EVT; 4460 spin_lock_irqsave(&phba->hbalock, flags); 4461 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4462 &phba->work_list); 4463 spin_unlock_irqrestore(&phba->hbalock, flags); 4464 lpfc_worker_wake_up(phba); 4465 break; 4466 case IOSTAT_LOCAL_REJECT: 4467 case IOSTAT_REMOTE_STOP: 4468 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4469 lpfc_cmd->result == 4470 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4471 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4472 lpfc_cmd->result == 4473 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4474 cmd->result = DID_NO_CONNECT << 16; 4475 break; 4476 } 4477 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4478 lpfc_cmd->result == IOERR_NO_RESOURCES || 4479 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4480 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4481 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4482 break; 4483 } 4484 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4485 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4486 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4487 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4488 /* 4489 * This is a response for a BG enabled 4490 * cmd. Parse BG error 4491 */ 4492 lpfc_parse_bg_err(phba, lpfc_cmd, 4493 pIocbOut); 4494 break; 4495 } else { 4496 lpfc_printf_vlog(vport, KERN_WARNING, 4497 LOG_BG, 4498 "9031 non-zero BGSTAT " 4499 "on unprotected cmd\n"); 4500 } 4501 } 4502 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4503 && (phba->sli_rev == LPFC_SLI_REV4) 4504 && pnode) { 4505 /* This IO was aborted by the target, we don't 4506 * know the rxid and because we did not send the 4507 * ABTS we cannot generate and RRQ. 4508 */ 4509 lpfc_set_rrq_active(phba, pnode, 4510 lpfc_cmd->cur_iocbq.sli4_lxritag, 4511 0, 0); 4512 } 4513 fallthrough; 4514 default: 4515 cmd->result = DID_ERROR << 16; 4516 break; 4517 } 4518 4519 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4520 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4521 SAM_STAT_BUSY; 4522 } else 4523 cmd->result = DID_OK << 16; 4524 4525 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4526 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4527 4528 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4529 "0710 Iodone <%d/%llu> cmd x%px, error " 4530 "x%x SNS x%x x%x Data: x%x x%x\n", 4531 cmd->device->id, cmd->device->lun, cmd, 4532 cmd->result, *lp, *(lp + 3), cmd->retries, 4533 scsi_get_resid(cmd)); 4534 } 4535 4536 if (vport->cfg_max_scsicmpl_time && 4537 time_after(jiffies, lpfc_cmd->start_time + 4538 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4539 spin_lock_irqsave(shost->host_lock, flags); 4540 if (pnode) { 4541 if (pnode->cmd_qdepth > 4542 atomic_read(&pnode->cmd_pending) && 4543 (atomic_read(&pnode->cmd_pending) > 4544 LPFC_MIN_TGT_QDEPTH) && 4545 ((cmd->cmnd[0] == READ_10) || 4546 (cmd->cmnd[0] == WRITE_10))) 4547 pnode->cmd_qdepth = 4548 atomic_read(&pnode->cmd_pending); 4549 4550 pnode->last_change_time = jiffies; 4551 } 4552 spin_unlock_irqrestore(shost->host_lock, flags); 4553 } 4554 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4555 4556 lpfc_cmd->pCmd = NULL; 4557 spin_unlock(&lpfc_cmd->buf_lock); 4558 4559 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4560 if (lpfc_cmd->ts_cmd_start) { 4561 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4562 lpfc_cmd->ts_data_io = ktime_get_ns(); 4563 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4564 lpfc_io_ktime(phba, lpfc_cmd); 4565 } 4566 #endif 4567 4568 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4569 scsi_done(cmd); 4570 4571 /* 4572 * If there is an abort thread waiting for command completion 4573 * wake up the thread. 4574 */ 4575 spin_lock(&lpfc_cmd->buf_lock); 4576 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; 4577 if (lpfc_cmd->waitq) 4578 wake_up(lpfc_cmd->waitq); 4579 spin_unlock(&lpfc_cmd->buf_lock); 4580 4581 lpfc_release_scsi_buf(phba, lpfc_cmd); 4582 } 4583 4584 /** 4585 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4586 * @vport: Pointer to vport object. 4587 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4588 * @tmo: timeout value for the IO 4589 * 4590 * Based on the data-direction of the command, initialize IOCB 4591 * in the I/O buffer. Fill in the IOCB fields which are independent 4592 * of the scsi buffer 4593 * 4594 * RETURNS 0 - SUCCESS, 4595 **/ 4596 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4597 struct lpfc_io_buf *lpfc_cmd, 4598 uint8_t tmo) 4599 { 4600 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4601 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4602 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4603 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4604 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4605 int datadir = scsi_cmnd->sc_data_direction; 4606 u32 fcpdl; 4607 4608 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4609 4610 /* 4611 * There are three possibilities here - use scatter-gather segment, use 4612 * the single mapping, or neither. Start the lpfc command prep by 4613 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4614 * data bde entry. 4615 */ 4616 if (scsi_sg_count(scsi_cmnd)) { 4617 if (datadir == DMA_TO_DEVICE) { 4618 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4619 iocb_cmd->ulpPU = PARM_READ_CHECK; 4620 if (vport->cfg_first_burst_size && 4621 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4622 u32 xrdy_len; 4623 4624 fcpdl = scsi_bufflen(scsi_cmnd); 4625 xrdy_len = min(fcpdl, 4626 vport->cfg_first_burst_size); 4627 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4628 } 4629 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4630 } else { 4631 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4632 iocb_cmd->ulpPU = PARM_READ_CHECK; 4633 fcp_cmnd->fcpCntl3 = READ_DATA; 4634 } 4635 } else { 4636 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4637 iocb_cmd->un.fcpi.fcpi_parm = 0; 4638 iocb_cmd->ulpPU = 0; 4639 fcp_cmnd->fcpCntl3 = 0; 4640 } 4641 4642 /* 4643 * Finish initializing those IOCB fields that are independent 4644 * of the scsi_cmnd request_buffer 4645 */ 4646 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4647 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4648 piocbq->iocb.ulpFCP2Rcvy = 1; 4649 else 4650 piocbq->iocb.ulpFCP2Rcvy = 0; 4651 4652 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4653 piocbq->io_buf = lpfc_cmd; 4654 if (!piocbq->cmd_cmpl) 4655 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4656 piocbq->iocb.ulpTimeout = tmo; 4657 piocbq->vport = vport; 4658 return 0; 4659 } 4660 4661 /** 4662 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4663 * @vport: Pointer to vport object. 4664 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4665 * @tmo: timeout value for the IO 4666 * 4667 * Based on the data-direction of the command copy WQE template 4668 * to I/O buffer WQE. Fill in the WQE fields which are independent 4669 * of the scsi buffer 4670 * 4671 * RETURNS 0 - SUCCESS, 4672 **/ 4673 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4674 struct lpfc_io_buf *lpfc_cmd, 4675 uint8_t tmo) 4676 { 4677 struct lpfc_hba *phba = vport->phba; 4678 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4679 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4680 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4681 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4682 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4683 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4684 u16 idx = lpfc_cmd->hdwq_no; 4685 int datadir = scsi_cmnd->sc_data_direction; 4686 4687 hdwq = &phba->sli4_hba.hdwq[idx]; 4688 4689 /* Initialize 64 bytes only */ 4690 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4691 4692 /* 4693 * There are three possibilities here - use scatter-gather segment, use 4694 * the single mapping, or neither. 4695 */ 4696 if (scsi_sg_count(scsi_cmnd)) { 4697 if (datadir == DMA_TO_DEVICE) { 4698 /* From the iwrite template, initialize words 7 - 11 */ 4699 memcpy(&wqe->words[7], 4700 &lpfc_iwrite_cmd_template.words[7], 4701 sizeof(uint32_t) * 5); 4702 4703 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4704 if (hdwq) 4705 hdwq->scsi_cstat.output_requests++; 4706 } else { 4707 /* From the iread template, initialize words 7 - 11 */ 4708 memcpy(&wqe->words[7], 4709 &lpfc_iread_cmd_template.words[7], 4710 sizeof(uint32_t) * 5); 4711 4712 /* Word 7 */ 4713 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4714 4715 fcp_cmnd->fcpCntl3 = READ_DATA; 4716 if (hdwq) 4717 hdwq->scsi_cstat.input_requests++; 4718 4719 /* For a CMF Managed port, iod must be zero'ed */ 4720 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 4721 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 4722 LPFC_WQE_IOD_NONE); 4723 } 4724 } else { 4725 /* From the icmnd template, initialize words 4 - 11 */ 4726 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4727 sizeof(uint32_t) * 8); 4728 4729 /* Word 7 */ 4730 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4731 4732 fcp_cmnd->fcpCntl3 = 0; 4733 if (hdwq) 4734 hdwq->scsi_cstat.control_requests++; 4735 } 4736 4737 /* 4738 * Finish initializing those WQE fields that are independent 4739 * of the request_buffer 4740 */ 4741 4742 /* Word 3 */ 4743 bf_set(payload_offset_len, &wqe->fcp_icmd, 4744 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4745 4746 /* Word 6 */ 4747 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4748 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4749 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4750 4751 /* Word 7*/ 4752 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4753 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4754 4755 bf_set(wqe_class, &wqe->generic.wqe_com, 4756 (pnode->nlp_fcp_info & 0x0f)); 4757 4758 /* Word 8 */ 4759 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4760 4761 /* Word 9 */ 4762 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4763 4764 pwqeq->vport = vport; 4765 pwqeq->io_buf = lpfc_cmd; 4766 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4767 pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4768 4769 return 0; 4770 } 4771 4772 /** 4773 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4774 * @vport: The virtual port for which this call is being executed. 4775 * @lpfc_cmd: The scsi command which needs to send. 4776 * @pnode: Pointer to lpfc_nodelist. 4777 * 4778 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4779 * to transfer for device with SLI3 interface spec. 4780 **/ 4781 static int 4782 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4783 struct lpfc_nodelist *pnode) 4784 { 4785 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4786 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4787 u8 *ptr; 4788 4789 if (!pnode) 4790 return 0; 4791 4792 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4793 /* clear task management bits */ 4794 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4795 4796 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4797 &lpfc_cmd->fcp_cmnd->fcp_lun); 4798 4799 ptr = &fcp_cmnd->fcpCdb[0]; 4800 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4801 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4802 ptr += scsi_cmnd->cmd_len; 4803 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4804 } 4805 4806 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4807 4808 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4809 4810 return 0; 4811 } 4812 4813 /** 4814 * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit 4815 * @vport: The virtual port for which this call is being executed. 4816 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4817 * @lun: Logical unit number. 4818 * @task_mgmt_cmd: SCSI task management command. 4819 * 4820 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4821 * for device with SLI-3 interface spec. 4822 * 4823 * Return codes: 4824 * 0 - Error 4825 * 1 - Success 4826 **/ 4827 static int 4828 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, 4829 struct lpfc_io_buf *lpfc_cmd, 4830 u64 lun, u8 task_mgmt_cmd) 4831 { 4832 struct lpfc_iocbq *piocbq; 4833 IOCB_t *piocb; 4834 struct fcp_cmnd *fcp_cmnd; 4835 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4836 struct lpfc_nodelist *ndlp = rdata->pnode; 4837 4838 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4839 return 0; 4840 4841 piocbq = &(lpfc_cmd->cur_iocbq); 4842 piocbq->vport = vport; 4843 4844 piocb = &piocbq->iocb; 4845 4846 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4847 /* Clear out any old data in the FCP command area */ 4848 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4849 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4850 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4851 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4852 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 4853 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4854 piocb->ulpContext = ndlp->nlp_rpi; 4855 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4856 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4857 piocb->ulpPU = 0; 4858 piocb->un.fcpi.fcpi_parm = 0; 4859 4860 /* ulpTimeout is only one byte */ 4861 if (lpfc_cmd->timeout > 0xff) { 4862 /* 4863 * Do not timeout the command at the firmware level. 4864 * The driver will provide the timeout mechanism. 4865 */ 4866 piocb->ulpTimeout = 0; 4867 } else 4868 piocb->ulpTimeout = lpfc_cmd->timeout; 4869 4870 return 1; 4871 } 4872 4873 /** 4874 * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit 4875 * @vport: The virtual port for which this call is being executed. 4876 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4877 * @lun: Logical unit number. 4878 * @task_mgmt_cmd: SCSI task management command. 4879 * 4880 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4881 * for device with SLI-4 interface spec. 4882 * 4883 * Return codes: 4884 * 0 - Error 4885 * 1 - Success 4886 **/ 4887 static int 4888 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, 4889 struct lpfc_io_buf *lpfc_cmd, 4890 u64 lun, u8 task_mgmt_cmd) 4891 { 4892 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4893 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4894 struct fcp_cmnd *fcp_cmnd; 4895 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4896 struct lpfc_nodelist *ndlp = rdata->pnode; 4897 4898 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4899 return 0; 4900 4901 pwqeq->vport = vport; 4902 /* Initialize 64 bytes only */ 4903 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4904 4905 /* From the icmnd template, initialize words 4 - 11 */ 4906 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4907 sizeof(uint32_t) * 8); 4908 4909 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4910 /* Clear out any old data in the FCP command area */ 4911 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4912 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4913 fcp_cmnd->fcpCntl3 = 0; 4914 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4915 4916 bf_set(payload_offset_len, &wqe->fcp_icmd, 4917 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4918 bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); 4919 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ 4920 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 4921 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 4922 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); 4923 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, 4924 (ndlp->nlp_fcp_info & 0x0f)); 4925 4926 /* ulpTimeout is only one byte */ 4927 if (lpfc_cmd->timeout > 0xff) { 4928 /* 4929 * Do not timeout the command at the firmware level. 4930 * The driver will provide the timeout mechanism. 4931 */ 4932 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); 4933 } else { 4934 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); 4935 } 4936 4937 lpfc_prep_embed_io(vport->phba, lpfc_cmd); 4938 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4939 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4940 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4941 4942 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 4943 4944 return 1; 4945 } 4946 4947 /** 4948 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 4949 * @phba: The hba struct for which this call is being executed. 4950 * @dev_grp: The HBA PCI-Device group number. 4951 * 4952 * This routine sets up the SCSI interface API function jump table in @phba 4953 * struct. 4954 * Returns: 0 - success, -ENODEV - failure. 4955 **/ 4956 int 4957 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4958 { 4959 4960 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 4961 4962 switch (dev_grp) { 4963 case LPFC_PCI_DEV_LP: 4964 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 4965 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 4966 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 4967 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 4968 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 4969 phba->lpfc_scsi_prep_task_mgmt_cmd = 4970 lpfc_scsi_prep_task_mgmt_cmd_s3; 4971 break; 4972 case LPFC_PCI_DEV_OC: 4973 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 4974 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 4975 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 4976 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 4977 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 4978 phba->lpfc_scsi_prep_task_mgmt_cmd = 4979 lpfc_scsi_prep_task_mgmt_cmd_s4; 4980 break; 4981 default: 4982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4983 "1418 Invalid HBA PCI-device group: 0x%x\n", 4984 dev_grp); 4985 return -ENODEV; 4986 } 4987 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 4988 return 0; 4989 } 4990 4991 /** 4992 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 4993 * @phba: The Hba for which this call is being executed. 4994 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 4995 * @rspiocbq: Pointer to lpfc_iocbq data structure. 4996 * 4997 * This routine is IOCB completion routine for device reset and target reset 4998 * routine. This routine release scsi buffer associated with lpfc_cmd. 4999 **/ 5000 static void 5001 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 5002 struct lpfc_iocbq *cmdiocbq, 5003 struct lpfc_iocbq *rspiocbq) 5004 { 5005 struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; 5006 if (lpfc_cmd) 5007 lpfc_release_scsi_buf(phba, lpfc_cmd); 5008 return; 5009 } 5010 5011 /** 5012 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 5013 * if issuing a pci_bus_reset is possibly unsafe 5014 * @phba: lpfc_hba pointer. 5015 * 5016 * Description: 5017 * Walks the bus_list to ensure only PCI devices with Emulex 5018 * vendor id, device ids that support hot reset, and only one occurrence 5019 * of function 0. 5020 * 5021 * Returns: 5022 * -EBADSLT, detected invalid device 5023 * 0, successful 5024 */ 5025 int 5026 lpfc_check_pci_resettable(struct lpfc_hba *phba) 5027 { 5028 const struct pci_dev *pdev = phba->pcidev; 5029 struct pci_dev *ptr = NULL; 5030 u8 counter = 0; 5031 5032 /* Walk the list of devices on the pci_dev's bus */ 5033 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 5034 /* Check for Emulex Vendor ID */ 5035 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5036 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5037 "8346 Non-Emulex vendor found: " 5038 "0x%04x\n", ptr->vendor); 5039 return -EBADSLT; 5040 } 5041 5042 /* Check for valid Emulex Device ID */ 5043 if (phba->sli_rev != LPFC_SLI_REV4 || 5044 phba->hba_flag & HBA_FCOE_MODE) { 5045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5046 "8347 Incapable PCI reset device: " 5047 "0x%04x\n", ptr->device); 5048 return -EBADSLT; 5049 } 5050 5051 /* Check for only one function 0 ID to ensure only one HBA on 5052 * secondary bus 5053 */ 5054 if (ptr->devfn == 0) { 5055 if (++counter > 1) { 5056 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5057 "8348 More than one device on " 5058 "secondary bus found\n"); 5059 return -EBADSLT; 5060 } 5061 } 5062 } 5063 5064 return 0; 5065 } 5066 5067 /** 5068 * lpfc_info - Info entry point of scsi_host_template data structure 5069 * @host: The scsi host for which this call is being executed. 5070 * 5071 * This routine provides module information about hba. 5072 * 5073 * Reutrn code: 5074 * Pointer to char - Success. 5075 **/ 5076 const char * 5077 lpfc_info(struct Scsi_Host *host) 5078 { 5079 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5080 struct lpfc_hba *phba = vport->phba; 5081 int link_speed = 0; 5082 static char lpfcinfobuf[384]; 5083 char tmp[384] = {0}; 5084 5085 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5086 if (phba && phba->pcidev){ 5087 /* Model Description */ 5088 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5089 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5090 sizeof(lpfcinfobuf)) 5091 goto buffer_done; 5092 5093 /* PCI Info */ 5094 scnprintf(tmp, sizeof(tmp), 5095 " on PCI bus %02x device %02x irq %d", 5096 phba->pcidev->bus->number, phba->pcidev->devfn, 5097 phba->pcidev->irq); 5098 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5099 sizeof(lpfcinfobuf)) 5100 goto buffer_done; 5101 5102 /* Port Number */ 5103 if (phba->Port[0]) { 5104 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5105 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5106 sizeof(lpfcinfobuf)) 5107 goto buffer_done; 5108 } 5109 5110 /* Link Speed */ 5111 link_speed = lpfc_sli_port_speed_get(phba); 5112 if (link_speed != 0) { 5113 scnprintf(tmp, sizeof(tmp), 5114 " Logical Link Speed: %d Mbps", link_speed); 5115 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5116 sizeof(lpfcinfobuf)) 5117 goto buffer_done; 5118 } 5119 5120 /* PCI resettable */ 5121 if (!lpfc_check_pci_resettable(phba)) { 5122 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5123 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5124 } 5125 } 5126 5127 buffer_done: 5128 return lpfcinfobuf; 5129 } 5130 5131 /** 5132 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5133 * @phba: The Hba for which this call is being executed. 5134 * 5135 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5136 * The default value of cfg_poll_tmo is 10 milliseconds. 5137 **/ 5138 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5139 { 5140 unsigned long poll_tmo_expires = 5141 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5142 5143 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5144 mod_timer(&phba->fcp_poll_timer, 5145 poll_tmo_expires); 5146 } 5147 5148 /** 5149 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5150 * @phba: The Hba for which this call is being executed. 5151 * 5152 * This routine starts the fcp_poll_timer of @phba. 5153 **/ 5154 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5155 { 5156 lpfc_poll_rearm_timer(phba); 5157 } 5158 5159 /** 5160 * lpfc_poll_timeout - Restart polling timer 5161 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5162 * 5163 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5164 * and FCP Ring interrupt is disable. 5165 **/ 5166 void lpfc_poll_timeout(struct timer_list *t) 5167 { 5168 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5169 5170 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5171 lpfc_sli_handle_fast_ring_event(phba, 5172 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5173 5174 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5175 lpfc_poll_rearm_timer(phba); 5176 } 5177 } 5178 5179 /* 5180 * lpfc_is_command_vm_io - get the UUID from blk cgroup 5181 * @cmd: Pointer to scsi_cmnd data structure 5182 * Returns UUID if present, otherwise NULL 5183 */ 5184 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) 5185 { 5186 struct bio *bio = scsi_cmd_to_rq(cmd)->bio; 5187 5188 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) 5189 return NULL; 5190 return blkcg_get_fc_appid(bio); 5191 } 5192 5193 /** 5194 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5195 * @shost: kernel scsi host pointer. 5196 * @cmnd: Pointer to scsi_cmnd data structure. 5197 * 5198 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5199 * This routine prepares an IOCB from scsi command and provides to firmware. 5200 * The @done callback is invoked after driver finished processing the command. 5201 * 5202 * Return value : 5203 * 0 - Success 5204 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5205 **/ 5206 static int 5207 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5208 { 5209 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5210 struct lpfc_hba *phba = vport->phba; 5211 struct lpfc_iocbq *cur_iocbq = NULL; 5212 struct lpfc_rport_data *rdata; 5213 struct lpfc_nodelist *ndlp; 5214 struct lpfc_io_buf *lpfc_cmd; 5215 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5216 int err, idx; 5217 u8 *uuid = NULL; 5218 uint64_t start; 5219 5220 start = ktime_get_ns(); 5221 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5222 5223 /* sanity check on references */ 5224 if (unlikely(!rdata) || unlikely(!rport)) 5225 goto out_fail_command; 5226 5227 err = fc_remote_port_chkready(rport); 5228 if (err) { 5229 cmnd->result = err; 5230 goto out_fail_command; 5231 } 5232 ndlp = rdata->pnode; 5233 5234 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5235 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5236 5237 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5238 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5239 " op:%02x str=%s without registering for" 5240 " BlockGuard - Rejecting command\n", 5241 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5242 dif_op_str[scsi_get_prot_op(cmnd)]); 5243 goto out_fail_command; 5244 } 5245 5246 /* 5247 * Catch race where our node has transitioned, but the 5248 * transport is still transitioning. 5249 */ 5250 if (!ndlp) 5251 goto out_tgt_busy1; 5252 5253 /* Check if IO qualifies for CMF */ 5254 if (phba->cmf_active_mode != LPFC_CFG_OFF && 5255 cmnd->sc_data_direction == DMA_FROM_DEVICE && 5256 (scsi_sg_count(cmnd))) { 5257 /* Latency start time saved in rx_cmd_start later in routine */ 5258 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); 5259 if (err) 5260 goto out_tgt_busy1; 5261 } 5262 5263 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5264 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5265 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5266 "3377 Target Queue Full, scsi Id:%d " 5267 "Qdepth:%d Pending command:%d" 5268 " WWNN:%02x:%02x:%02x:%02x:" 5269 "%02x:%02x:%02x:%02x, " 5270 " WWPN:%02x:%02x:%02x:%02x:" 5271 "%02x:%02x:%02x:%02x", 5272 ndlp->nlp_sid, ndlp->cmd_qdepth, 5273 atomic_read(&ndlp->cmd_pending), 5274 ndlp->nlp_nodename.u.wwn[0], 5275 ndlp->nlp_nodename.u.wwn[1], 5276 ndlp->nlp_nodename.u.wwn[2], 5277 ndlp->nlp_nodename.u.wwn[3], 5278 ndlp->nlp_nodename.u.wwn[4], 5279 ndlp->nlp_nodename.u.wwn[5], 5280 ndlp->nlp_nodename.u.wwn[6], 5281 ndlp->nlp_nodename.u.wwn[7], 5282 ndlp->nlp_portname.u.wwn[0], 5283 ndlp->nlp_portname.u.wwn[1], 5284 ndlp->nlp_portname.u.wwn[2], 5285 ndlp->nlp_portname.u.wwn[3], 5286 ndlp->nlp_portname.u.wwn[4], 5287 ndlp->nlp_portname.u.wwn[5], 5288 ndlp->nlp_portname.u.wwn[6], 5289 ndlp->nlp_portname.u.wwn[7]); 5290 goto out_tgt_busy2; 5291 } 5292 } 5293 5294 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5295 if (lpfc_cmd == NULL) { 5296 lpfc_rampdown_queue_depth(phba); 5297 5298 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5299 "0707 driver's buffer pool is empty, " 5300 "IO busied\n"); 5301 goto out_host_busy; 5302 } 5303 lpfc_cmd->rx_cmd_start = start; 5304 5305 cur_iocbq = &lpfc_cmd->cur_iocbq; 5306 /* 5307 * Store the midlayer's command structure for the completion phase 5308 * and complete the command initialization. 5309 */ 5310 lpfc_cmd->pCmd = cmnd; 5311 lpfc_cmd->rdata = rdata; 5312 lpfc_cmd->ndlp = ndlp; 5313 cur_iocbq->cmd_cmpl = NULL; 5314 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5315 5316 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5317 if (err) 5318 goto out_host_busy_release_buf; 5319 5320 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5321 if (vport->phba->cfg_enable_bg) { 5322 lpfc_printf_vlog(vport, 5323 KERN_INFO, LOG_SCSI_CMD, 5324 "9033 BLKGRD: rcvd %s cmd:x%x " 5325 "reftag x%x cnt %u pt %x\n", 5326 dif_op_str[scsi_get_prot_op(cmnd)], 5327 cmnd->cmnd[0], 5328 scsi_prot_ref_tag(cmnd), 5329 scsi_logical_block_count(cmnd), 5330 (cmnd->cmnd[1]>>5)); 5331 } 5332 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5333 } else { 5334 if (vport->phba->cfg_enable_bg) { 5335 lpfc_printf_vlog(vport, 5336 KERN_INFO, LOG_SCSI_CMD, 5337 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5338 "x%x reftag x%x cnt %u pt %x\n", 5339 cmnd->cmnd[0], 5340 scsi_prot_ref_tag(cmnd), 5341 scsi_logical_block_count(cmnd), 5342 (cmnd->cmnd[1]>>5)); 5343 } 5344 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5345 } 5346 5347 if (unlikely(err)) { 5348 if (err == 2) { 5349 cmnd->result = DID_ERROR << 16; 5350 goto out_fail_command_release_buf; 5351 } 5352 goto out_host_busy_free_buf; 5353 } 5354 5355 /* check the necessary and sufficient condition to support VMID */ 5356 if (lpfc_is_vmid_enabled(phba) && 5357 (ndlp->vmid_support || 5358 phba->pport->vmid_priority_tagging == 5359 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 5360 /* is the I/O generated by a VM, get the associated virtual */ 5361 /* entity id */ 5362 uuid = lpfc_is_command_vm_io(cmnd); 5363 5364 if (uuid) { 5365 err = lpfc_vmid_get_appid(vport, uuid, 5366 cmnd->sc_data_direction, 5367 (union lpfc_vmid_io_tag *) 5368 &cur_iocbq->vmid_tag); 5369 if (!err) 5370 cur_iocbq->cmd_flag |= LPFC_IO_VMID; 5371 } 5372 } 5373 5374 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5375 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5376 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5377 #endif 5378 /* Issue I/O to adapter */ 5379 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq, 5380 SLI_IOCB_RET_IOCB); 5381 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5382 if (start) { 5383 lpfc_cmd->ts_cmd_start = start; 5384 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5385 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5386 } else { 5387 lpfc_cmd->ts_cmd_start = 0; 5388 } 5389 #endif 5390 if (err) { 5391 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5392 "3376 FCP could not issue iocb err %x " 5393 "FCP cmd x%x <%d/%llu> " 5394 "sid: x%x did: x%x oxid: x%x " 5395 "Data: x%x x%x x%x x%x\n", 5396 err, cmnd->cmnd[0], 5397 cmnd->device ? cmnd->device->id : 0xffff, 5398 cmnd->device ? cmnd->device->lun : (u64)-1, 5399 vport->fc_myDID, ndlp->nlp_DID, 5400 phba->sli_rev == LPFC_SLI_REV4 ? 5401 cur_iocbq->sli4_xritag : 0xffff, 5402 phba->sli_rev == LPFC_SLI_REV4 ? 5403 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5404 cur_iocbq->iocb.ulpContext, 5405 cur_iocbq->iotag, 5406 phba->sli_rev == LPFC_SLI_REV4 ? 5407 bf_get(wqe_tmo, 5408 &cur_iocbq->wqe.generic.wqe_com) : 5409 cur_iocbq->iocb.ulpTimeout, 5410 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); 5411 5412 goto out_host_busy_free_buf; 5413 } 5414 5415 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5416 lpfc_sli_handle_fast_ring_event(phba, 5417 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5418 5419 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5420 lpfc_poll_rearm_timer(phba); 5421 } 5422 5423 if (phba->cfg_xri_rebalancing) 5424 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5425 5426 return 0; 5427 5428 out_host_busy_free_buf: 5429 idx = lpfc_cmd->hdwq_no; 5430 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5431 if (phba->sli4_hba.hdwq) { 5432 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5433 case WRITE_DATA: 5434 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5435 break; 5436 case READ_DATA: 5437 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5438 break; 5439 default: 5440 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5441 } 5442 } 5443 out_host_busy_release_buf: 5444 lpfc_release_scsi_buf(phba, lpfc_cmd); 5445 out_host_busy: 5446 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5447 shost); 5448 return SCSI_MLQUEUE_HOST_BUSY; 5449 5450 out_tgt_busy2: 5451 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5452 shost); 5453 out_tgt_busy1: 5454 return SCSI_MLQUEUE_TARGET_BUSY; 5455 5456 out_fail_command_release_buf: 5457 lpfc_release_scsi_buf(phba, lpfc_cmd); 5458 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5459 shost); 5460 5461 out_fail_command: 5462 scsi_done(cmnd); 5463 return 0; 5464 } 5465 5466 /* 5467 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport 5468 * @vport: The virtual port for which this call is being executed. 5469 */ 5470 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) 5471 { 5472 u32 bucket; 5473 struct lpfc_vmid *cur; 5474 5475 if (vport->port_type == LPFC_PHYSICAL_PORT) 5476 del_timer_sync(&vport->phba->inactive_vmid_poll); 5477 5478 kfree(vport->qfpa_res); 5479 kfree(vport->vmid_priority.vmid_range); 5480 kfree(vport->vmid); 5481 5482 if (!hash_empty(vport->hash_table)) 5483 hash_for_each(vport->hash_table, bucket, cur, hnode) 5484 hash_del(&cur->hnode); 5485 5486 vport->qfpa_res = NULL; 5487 vport->vmid_priority.vmid_range = NULL; 5488 vport->vmid = NULL; 5489 vport->cur_vmid_cnt = 0; 5490 } 5491 5492 /** 5493 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5494 * @cmnd: Pointer to scsi_cmnd data structure. 5495 * 5496 * This routine aborts @cmnd pending in base driver. 5497 * 5498 * Return code : 5499 * 0x2003 - Error 5500 * 0x2002 - Success 5501 **/ 5502 static int 5503 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5504 { 5505 struct Scsi_Host *shost = cmnd->device->host; 5506 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5507 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5508 struct lpfc_hba *phba = vport->phba; 5509 struct lpfc_iocbq *iocb; 5510 struct lpfc_io_buf *lpfc_cmd; 5511 int ret = SUCCESS, status = 0; 5512 struct lpfc_sli_ring *pring_s4 = NULL; 5513 struct lpfc_sli_ring *pring = NULL; 5514 int ret_val; 5515 unsigned long flags; 5516 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5517 5518 status = fc_block_rport(rport); 5519 if (status != 0 && status != SUCCESS) 5520 return status; 5521 5522 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5523 if (!lpfc_cmd) 5524 return ret; 5525 5526 /* Guard against IO completion being called at same time */ 5527 spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); 5528 5529 spin_lock(&phba->hbalock); 5530 /* driver queued commands are in process of being flushed */ 5531 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5532 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5533 "3168 SCSI Layer abort requested I/O has been " 5534 "flushed by LLD.\n"); 5535 ret = FAILED; 5536 goto out_unlock_hba; 5537 } 5538 5539 if (!lpfc_cmd->pCmd) { 5540 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5541 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5542 "x%x ID %d LUN %llu\n", 5543 SUCCESS, cmnd->device->id, cmnd->device->lun); 5544 goto out_unlock_hba; 5545 } 5546 5547 iocb = &lpfc_cmd->cur_iocbq; 5548 if (phba->sli_rev == LPFC_SLI_REV4) { 5549 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5550 if (!pring_s4) { 5551 ret = FAILED; 5552 goto out_unlock_hba; 5553 } 5554 spin_lock(&pring_s4->ring_lock); 5555 } 5556 /* the command is in process of being cancelled */ 5557 if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 5558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5559 "3169 SCSI Layer abort requested I/O has been " 5560 "cancelled by LLD.\n"); 5561 ret = FAILED; 5562 goto out_unlock_ring; 5563 } 5564 /* 5565 * If pCmd field of the corresponding lpfc_io_buf structure 5566 * points to a different SCSI command, then the driver has 5567 * already completed this command, but the midlayer did not 5568 * see the completion before the eh fired. Just return SUCCESS. 5569 */ 5570 if (lpfc_cmd->pCmd != cmnd) { 5571 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5572 "3170 SCSI Layer abort requested I/O has been " 5573 "completed by LLD.\n"); 5574 goto out_unlock_ring; 5575 } 5576 5577 WARN_ON(iocb->io_buf != lpfc_cmd); 5578 5579 /* abort issued in recovery is still in progress */ 5580 if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { 5581 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5582 "3389 SCSI Layer I/O Abort Request is pending\n"); 5583 if (phba->sli_rev == LPFC_SLI_REV4) 5584 spin_unlock(&pring_s4->ring_lock); 5585 spin_unlock(&phba->hbalock); 5586 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5587 goto wait_for_cmpl; 5588 } 5589 5590 lpfc_cmd->waitq = &waitq; 5591 if (phba->sli_rev == LPFC_SLI_REV4) { 5592 spin_unlock(&pring_s4->ring_lock); 5593 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5594 lpfc_sli_abort_fcp_cmpl); 5595 } else { 5596 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5597 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5598 lpfc_sli_abort_fcp_cmpl); 5599 } 5600 5601 /* Make sure HBA is alive */ 5602 lpfc_issue_hb_tmo(phba); 5603 5604 if (ret_val != IOCB_SUCCESS) { 5605 /* Indicate the IO is not being aborted by the driver. */ 5606 lpfc_cmd->waitq = NULL; 5607 ret = FAILED; 5608 goto out_unlock_hba; 5609 } 5610 5611 /* no longer need the lock after this point */ 5612 spin_unlock(&phba->hbalock); 5613 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5614 5615 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5616 lpfc_sli_handle_fast_ring_event(phba, 5617 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5618 5619 wait_for_cmpl: 5620 /* 5621 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait 5622 * for abort to complete. 5623 */ 5624 wait_event_timeout(waitq, 5625 (lpfc_cmd->pCmd != cmnd), 5626 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5627 5628 spin_lock(&lpfc_cmd->buf_lock); 5629 5630 if (lpfc_cmd->pCmd == cmnd) { 5631 ret = FAILED; 5632 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5633 "0748 abort handler timed out waiting " 5634 "for aborting I/O (xri:x%x) to complete: " 5635 "ret %#x, ID %d, LUN %llu\n", 5636 iocb->sli4_xritag, ret, 5637 cmnd->device->id, cmnd->device->lun); 5638 } 5639 5640 lpfc_cmd->waitq = NULL; 5641 5642 spin_unlock(&lpfc_cmd->buf_lock); 5643 goto out; 5644 5645 out_unlock_ring: 5646 if (phba->sli_rev == LPFC_SLI_REV4) 5647 spin_unlock(&pring_s4->ring_lock); 5648 out_unlock_hba: 5649 spin_unlock(&phba->hbalock); 5650 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5651 out: 5652 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5653 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 5654 "LUN %llu\n", ret, cmnd->device->id, 5655 cmnd->device->lun); 5656 return ret; 5657 } 5658 5659 static char * 5660 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 5661 { 5662 switch (task_mgmt_cmd) { 5663 case FCP_ABORT_TASK_SET: 5664 return "ABORT_TASK_SET"; 5665 case FCP_CLEAR_TASK_SET: 5666 return "FCP_CLEAR_TASK_SET"; 5667 case FCP_BUS_RESET: 5668 return "FCP_BUS_RESET"; 5669 case FCP_LUN_RESET: 5670 return "FCP_LUN_RESET"; 5671 case FCP_TARGET_RESET: 5672 return "FCP_TARGET_RESET"; 5673 case FCP_CLEAR_ACA: 5674 return "FCP_CLEAR_ACA"; 5675 case FCP_TERMINATE_TASK: 5676 return "FCP_TERMINATE_TASK"; 5677 default: 5678 return "unknown"; 5679 } 5680 } 5681 5682 5683 /** 5684 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 5685 * @vport: The virtual port for which this call is being executed. 5686 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5687 * 5688 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 5689 * 5690 * Return code : 5691 * 0x2003 - Error 5692 * 0x2002 - Success 5693 **/ 5694 static int 5695 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 5696 { 5697 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 5698 uint32_t rsp_info; 5699 uint32_t rsp_len; 5700 uint8_t rsp_info_code; 5701 int ret = FAILED; 5702 5703 5704 if (fcprsp == NULL) 5705 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5706 "0703 fcp_rsp is missing\n"); 5707 else { 5708 rsp_info = fcprsp->rspStatus2; 5709 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 5710 rsp_info_code = fcprsp->rspInfo3; 5711 5712 5713 lpfc_printf_vlog(vport, KERN_INFO, 5714 LOG_FCP, 5715 "0706 fcp_rsp valid 0x%x," 5716 " rsp len=%d code 0x%x\n", 5717 rsp_info, 5718 rsp_len, rsp_info_code); 5719 5720 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 5721 * field specifies the number of valid bytes of FCP_RSP_INFO. 5722 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 5723 */ 5724 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 5725 ((rsp_len == 8) || (rsp_len == 4))) { 5726 switch (rsp_info_code) { 5727 case RSP_NO_FAILURE: 5728 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5729 "0715 Task Mgmt No Failure\n"); 5730 ret = SUCCESS; 5731 break; 5732 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5733 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5734 "0716 Task Mgmt Target " 5735 "reject\n"); 5736 break; 5737 case RSP_TM_NOT_COMPLETED: /* TM failed */ 5738 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5739 "0717 Task Mgmt Target " 5740 "failed TM\n"); 5741 break; 5742 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5743 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5744 "0718 Task Mgmt to invalid " 5745 "LUN\n"); 5746 break; 5747 } 5748 } 5749 } 5750 return ret; 5751 } 5752 5753 5754 /** 5755 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 5756 * @vport: The virtual port for which this call is being executed. 5757 * @rport: Pointer to remote port 5758 * @tgt_id: Target ID of remote device. 5759 * @lun_id: Lun number for the TMF 5760 * @task_mgmt_cmd: type of TMF to send 5761 * 5762 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 5763 * a remote port. 5764 * 5765 * Return Code: 5766 * 0x2003 - Error 5767 * 0x2002 - Success. 5768 **/ 5769 static int 5770 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, 5771 unsigned int tgt_id, uint64_t lun_id, 5772 uint8_t task_mgmt_cmd) 5773 { 5774 struct lpfc_hba *phba = vport->phba; 5775 struct lpfc_io_buf *lpfc_cmd; 5776 struct lpfc_iocbq *iocbq; 5777 struct lpfc_iocbq *iocbqrsp; 5778 struct lpfc_rport_data *rdata; 5779 struct lpfc_nodelist *pnode; 5780 int ret; 5781 int status; 5782 5783 rdata = rport->dd_data; 5784 if (!rdata || !rdata->pnode) 5785 return FAILED; 5786 pnode = rdata->pnode; 5787 5788 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL); 5789 if (lpfc_cmd == NULL) 5790 return FAILED; 5791 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 5792 lpfc_cmd->rdata = rdata; 5793 lpfc_cmd->pCmd = NULL; 5794 lpfc_cmd->ndlp = pnode; 5795 5796 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 5797 task_mgmt_cmd); 5798 if (!status) { 5799 lpfc_release_scsi_buf(phba, lpfc_cmd); 5800 return FAILED; 5801 } 5802 5803 iocbq = &lpfc_cmd->cur_iocbq; 5804 iocbqrsp = lpfc_sli_get_iocbq(phba); 5805 if (iocbqrsp == NULL) { 5806 lpfc_release_scsi_buf(phba, lpfc_cmd); 5807 return FAILED; 5808 } 5809 iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; 5810 iocbq->vport = vport; 5811 5812 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5813 "0702 Issue %s to TGT %d LUN %llu " 5814 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 5815 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 5816 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 5817 iocbq->cmd_flag); 5818 5819 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 5820 iocbq, iocbqrsp, lpfc_cmd->timeout); 5821 if ((status != IOCB_SUCCESS) || 5822 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) { 5823 if (status != IOCB_SUCCESS || 5824 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR) 5825 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5826 "0727 TMF %s to TGT %d LUN %llu " 5827 "failed (%d, %d) cmd_flag x%x\n", 5828 lpfc_taskmgmt_name(task_mgmt_cmd), 5829 tgt_id, lun_id, 5830 get_job_ulpstatus(phba, iocbqrsp), 5831 get_job_word4(phba, iocbqrsp), 5832 iocbq->cmd_flag); 5833 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5834 if (status == IOCB_SUCCESS) { 5835 if (get_job_ulpstatus(phba, iocbqrsp) == 5836 IOSTAT_FCP_RSP_ERROR) 5837 /* Something in the FCP_RSP was invalid. 5838 * Check conditions */ 5839 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 5840 else 5841 ret = FAILED; 5842 } else if ((status == IOCB_TIMEDOUT) || 5843 (status == IOCB_ABORTED)) { 5844 ret = TIMEOUT_ERROR; 5845 } else { 5846 ret = FAILED; 5847 } 5848 } else 5849 ret = SUCCESS; 5850 5851 lpfc_sli_release_iocbq(phba, iocbqrsp); 5852 5853 if (status != IOCB_TIMEDOUT) 5854 lpfc_release_scsi_buf(phba, lpfc_cmd); 5855 5856 return ret; 5857 } 5858 5859 /** 5860 * lpfc_chk_tgt_mapped - 5861 * @vport: The virtual port to check on 5862 * @rport: Pointer to fc_rport data structure. 5863 * 5864 * This routine delays until the scsi target (aka rport) for the 5865 * command exists (is present and logged in) or we declare it non-existent. 5866 * 5867 * Return code : 5868 * 0x2003 - Error 5869 * 0x2002 - Success 5870 **/ 5871 static int 5872 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) 5873 { 5874 struct lpfc_rport_data *rdata; 5875 struct lpfc_nodelist *pnode = NULL; 5876 unsigned long later; 5877 5878 rdata = rport->dd_data; 5879 if (!rdata) { 5880 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5881 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 5882 return FAILED; 5883 } 5884 pnode = rdata->pnode; 5885 5886 /* 5887 * If target is not in a MAPPED state, delay until 5888 * target is rediscovered or devloss timeout expires. 5889 */ 5890 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5891 while (time_after(later, jiffies)) { 5892 if (!pnode) 5893 return FAILED; 5894 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5895 return SUCCESS; 5896 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5897 rdata = rport->dd_data; 5898 if (!rdata) 5899 return FAILED; 5900 pnode = rdata->pnode; 5901 } 5902 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 5903 return FAILED; 5904 return SUCCESS; 5905 } 5906 5907 /** 5908 * lpfc_reset_flush_io_context - 5909 * @vport: The virtual port (scsi_host) for the flush context 5910 * @tgt_id: If aborting by Target contect - specifies the target id 5911 * @lun_id: If aborting by Lun context - specifies the lun id 5912 * @context: specifies the context level to flush at. 5913 * 5914 * After a reset condition via TMF, we need to flush orphaned i/o 5915 * contexts from the adapter. This routine aborts any contexts 5916 * outstanding, then waits for their completions. The wait is 5917 * bounded by devloss_tmo though. 5918 * 5919 * Return code : 5920 * 0x2003 - Error 5921 * 0x2002 - Success 5922 **/ 5923 static int 5924 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 5925 uint64_t lun_id, lpfc_ctx_cmd context) 5926 { 5927 struct lpfc_hba *phba = vport->phba; 5928 unsigned long later; 5929 int cnt; 5930 5931 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5932 if (cnt) 5933 lpfc_sli_abort_taskmgmt(vport, 5934 &phba->sli.sli3_ring[LPFC_FCP_RING], 5935 tgt_id, lun_id, context); 5936 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5937 while (time_after(later, jiffies) && cnt) { 5938 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 5939 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5940 } 5941 if (cnt) { 5942 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5943 "0724 I/O flush failure for context %s : cnt x%x\n", 5944 ((context == LPFC_CTX_LUN) ? "LUN" : 5945 ((context == LPFC_CTX_TGT) ? "TGT" : 5946 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 5947 cnt); 5948 return FAILED; 5949 } 5950 return SUCCESS; 5951 } 5952 5953 /** 5954 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 5955 * @cmnd: Pointer to scsi_cmnd data structure. 5956 * 5957 * This routine does a device reset by sending a LUN_RESET task management 5958 * command. 5959 * 5960 * Return code : 5961 * 0x2003 - Error 5962 * 0x2002 - Success 5963 **/ 5964 static int 5965 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 5966 { 5967 struct Scsi_Host *shost = cmnd->device->host; 5968 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5969 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5970 struct lpfc_rport_data *rdata; 5971 struct lpfc_nodelist *pnode; 5972 unsigned tgt_id = cmnd->device->id; 5973 uint64_t lun_id = cmnd->device->lun; 5974 struct lpfc_scsi_event_header scsi_event; 5975 int status; 5976 u32 logit = LOG_FCP; 5977 5978 if (!rport) 5979 return FAILED; 5980 5981 rdata = rport->dd_data; 5982 if (!rdata || !rdata->pnode) { 5983 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5984 "0798 Device Reset rdata failure: rdata x%px\n", 5985 rdata); 5986 return FAILED; 5987 } 5988 pnode = rdata->pnode; 5989 status = fc_block_rport(rport); 5990 if (status != 0 && status != SUCCESS) 5991 return status; 5992 5993 status = lpfc_chk_tgt_mapped(vport, rport); 5994 if (status == FAILED) { 5995 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5996 "0721 Device Reset rport failure: rdata x%px\n", rdata); 5997 return FAILED; 5998 } 5999 6000 scsi_event.event_type = FC_REG_SCSI_EVENT; 6001 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 6002 scsi_event.lun = lun_id; 6003 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6004 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6005 6006 fc_host_post_vendor_event(shost, fc_get_event_number(), 6007 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6008 6009 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, 6010 FCP_LUN_RESET); 6011 if (status != SUCCESS) 6012 logit = LOG_TRACE_EVENT; 6013 6014 lpfc_printf_vlog(vport, KERN_ERR, logit, 6015 "0713 SCSI layer issued Device Reset (%d, %llu) " 6016 "return x%x\n", tgt_id, lun_id, status); 6017 6018 /* 6019 * We have to clean up i/o as : they may be orphaned by the TMF; 6020 * or if the TMF failed, they may be in an indeterminate state. 6021 * So, continue on. 6022 * We will report success if all the i/o aborts successfully. 6023 */ 6024 if (status == SUCCESS) 6025 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6026 LPFC_CTX_LUN); 6027 6028 return status; 6029 } 6030 6031 /** 6032 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 6033 * @cmnd: Pointer to scsi_cmnd data structure. 6034 * 6035 * This routine does a target reset by sending a TARGET_RESET task management 6036 * command. 6037 * 6038 * Return code : 6039 * 0x2003 - Error 6040 * 0x2002 - Success 6041 **/ 6042 static int 6043 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 6044 { 6045 struct Scsi_Host *shost = cmnd->device->host; 6046 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 6047 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6048 struct lpfc_rport_data *rdata; 6049 struct lpfc_nodelist *pnode; 6050 unsigned tgt_id = cmnd->device->id; 6051 uint64_t lun_id = cmnd->device->lun; 6052 struct lpfc_scsi_event_header scsi_event; 6053 int status; 6054 u32 logit = LOG_FCP; 6055 u32 dev_loss_tmo = vport->cfg_devloss_tmo; 6056 unsigned long flags; 6057 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 6058 6059 if (!rport) 6060 return FAILED; 6061 6062 rdata = rport->dd_data; 6063 if (!rdata || !rdata->pnode) { 6064 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6065 "0799 Target Reset rdata failure: rdata x%px\n", 6066 rdata); 6067 return FAILED; 6068 } 6069 pnode = rdata->pnode; 6070 status = fc_block_rport(rport); 6071 if (status != 0 && status != SUCCESS) 6072 return status; 6073 6074 status = lpfc_chk_tgt_mapped(vport, rport); 6075 if (status == FAILED) { 6076 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6077 "0722 Target Reset rport failure: rdata x%px\n", rdata); 6078 if (pnode) { 6079 spin_lock_irqsave(&pnode->lock, flags); 6080 pnode->nlp_flag &= ~NLP_NPR_ADISC; 6081 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6082 spin_unlock_irqrestore(&pnode->lock, flags); 6083 } 6084 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6085 LPFC_CTX_TGT); 6086 return FAST_IO_FAIL; 6087 } 6088 6089 scsi_event.event_type = FC_REG_SCSI_EVENT; 6090 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 6091 scsi_event.lun = 0; 6092 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6093 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6094 6095 fc_host_post_vendor_event(shost, fc_get_event_number(), 6096 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6097 6098 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, 6099 FCP_TARGET_RESET); 6100 if (status != SUCCESS) { 6101 logit = LOG_TRACE_EVENT; 6102 6103 /* Issue LOGO, if no LOGO is outstanding */ 6104 spin_lock_irqsave(&pnode->lock, flags); 6105 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && 6106 !pnode->logo_waitq) { 6107 pnode->logo_waitq = &waitq; 6108 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6109 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6110 pnode->save_flags |= NLP_WAIT_FOR_LOGO; 6111 spin_unlock_irqrestore(&pnode->lock, flags); 6112 lpfc_unreg_rpi(vport, pnode); 6113 wait_event_timeout(waitq, 6114 (!(pnode->save_flags & 6115 NLP_WAIT_FOR_LOGO)), 6116 msecs_to_jiffies(dev_loss_tmo * 6117 1000)); 6118 6119 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { 6120 lpfc_printf_vlog(vport, KERN_ERR, logit, 6121 "0725 SCSI layer TGTRST " 6122 "failed & LOGO TMO (%d, %llu) " 6123 "return x%x\n", 6124 tgt_id, lun_id, status); 6125 spin_lock_irqsave(&pnode->lock, flags); 6126 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; 6127 } else { 6128 spin_lock_irqsave(&pnode->lock, flags); 6129 } 6130 pnode->logo_waitq = NULL; 6131 spin_unlock_irqrestore(&pnode->lock, flags); 6132 status = SUCCESS; 6133 6134 } else { 6135 spin_unlock_irqrestore(&pnode->lock, flags); 6136 status = FAILED; 6137 } 6138 } 6139 6140 lpfc_printf_vlog(vport, KERN_ERR, logit, 6141 "0723 SCSI layer issued Target Reset (%d, %llu) " 6142 "return x%x\n", tgt_id, lun_id, status); 6143 6144 /* 6145 * We have to clean up i/o as : they may be orphaned by the TMF; 6146 * or if the TMF failed, they may be in an indeterminate state. 6147 * So, continue on. 6148 * We will report success if all the i/o aborts successfully. 6149 */ 6150 if (status == SUCCESS) 6151 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6152 LPFC_CTX_TGT); 6153 return status; 6154 } 6155 6156 /** 6157 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6158 * @cmnd: Pointer to scsi_cmnd data structure. 6159 * 6160 * This routine does host reset to the adaptor port. It brings the HBA 6161 * offline, performs a board restart, and then brings the board back online. 6162 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6163 * reject all outstanding SCSI commands to the host and error returned 6164 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6165 * of error handling, it will only return error if resetting of the adapter 6166 * is not successful; in all other cases, will return success. 6167 * 6168 * Return code : 6169 * 0x2003 - Error 6170 * 0x2002 - Success 6171 **/ 6172 static int 6173 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6174 { 6175 struct Scsi_Host *shost = cmnd->device->host; 6176 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6177 struct lpfc_hba *phba = vport->phba; 6178 int rc, ret = SUCCESS; 6179 6180 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6181 "3172 SCSI layer issued Host Reset Data:\n"); 6182 6183 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6184 lpfc_offline(phba); 6185 rc = lpfc_sli_brdrestart(phba); 6186 if (rc) 6187 goto error; 6188 6189 /* Wait for successful restart of adapter */ 6190 if (phba->sli_rev < LPFC_SLI_REV4) { 6191 rc = lpfc_sli_chipset_init(phba); 6192 if (rc) 6193 goto error; 6194 } 6195 6196 rc = lpfc_online(phba); 6197 if (rc) 6198 goto error; 6199 6200 lpfc_unblock_mgmt_io(phba); 6201 6202 return ret; 6203 error: 6204 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6205 "3323 Failed host reset\n"); 6206 lpfc_unblock_mgmt_io(phba); 6207 return FAILED; 6208 } 6209 6210 /** 6211 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6212 * @sdev: Pointer to scsi_device. 6213 * 6214 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6215 * globally available list of scsi buffers. This routine also makes sure scsi 6216 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6217 * of scsi buffer exists for the lifetime of the driver. 6218 * 6219 * Return codes: 6220 * non-0 - Error 6221 * 0 - Success 6222 **/ 6223 static int 6224 lpfc_slave_alloc(struct scsi_device *sdev) 6225 { 6226 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6227 struct lpfc_hba *phba = vport->phba; 6228 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6229 uint32_t total = 0; 6230 uint32_t num_to_alloc = 0; 6231 int num_allocated = 0; 6232 uint32_t sdev_cnt; 6233 struct lpfc_device_data *device_data; 6234 unsigned long flags; 6235 struct lpfc_name target_wwpn; 6236 6237 if (!rport || fc_remote_port_chkready(rport)) 6238 return -ENXIO; 6239 6240 if (phba->cfg_fof) { 6241 6242 /* 6243 * Check to see if the device data structure for the lun 6244 * exists. If not, create one. 6245 */ 6246 6247 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6248 spin_lock_irqsave(&phba->devicelock, flags); 6249 device_data = __lpfc_get_device_data(phba, 6250 &phba->luns, 6251 &vport->fc_portname, 6252 &target_wwpn, 6253 sdev->lun); 6254 if (!device_data) { 6255 spin_unlock_irqrestore(&phba->devicelock, flags); 6256 device_data = lpfc_create_device_data(phba, 6257 &vport->fc_portname, 6258 &target_wwpn, 6259 sdev->lun, 6260 phba->cfg_XLanePriority, 6261 true); 6262 if (!device_data) 6263 return -ENOMEM; 6264 spin_lock_irqsave(&phba->devicelock, flags); 6265 list_add_tail(&device_data->listentry, &phba->luns); 6266 } 6267 device_data->rport_data = rport->dd_data; 6268 device_data->available = true; 6269 spin_unlock_irqrestore(&phba->devicelock, flags); 6270 sdev->hostdata = device_data; 6271 } else { 6272 sdev->hostdata = rport->dd_data; 6273 } 6274 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6275 6276 /* For SLI4, all IO buffers are pre-allocated */ 6277 if (phba->sli_rev == LPFC_SLI_REV4) 6278 return 0; 6279 6280 /* This code path is now ONLY for SLI3 adapters */ 6281 6282 /* 6283 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6284 * available list of scsi buffers. Don't allocate more than the 6285 * HBA limit conveyed to the midlayer via the host structure. The 6286 * formula accounts for the lun_queue_depth + error handlers + 1 6287 * extra. This list of scsi bufs exists for the lifetime of the driver. 6288 */ 6289 total = phba->total_scsi_bufs; 6290 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6291 6292 /* If allocated buffers are enough do nothing */ 6293 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6294 return 0; 6295 6296 /* Allow some exchanges to be available always to complete discovery */ 6297 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6298 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6299 "0704 At limitation of %d preallocated " 6300 "command buffers\n", total); 6301 return 0; 6302 /* Allow some exchanges to be available always to complete discovery */ 6303 } else if (total + num_to_alloc > 6304 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6305 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6306 "0705 Allocation request of %d " 6307 "command buffers will exceed max of %d. " 6308 "Reducing allocation request to %d.\n", 6309 num_to_alloc, phba->cfg_hba_queue_depth, 6310 (phba->cfg_hba_queue_depth - total)); 6311 num_to_alloc = phba->cfg_hba_queue_depth - total; 6312 } 6313 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6314 if (num_to_alloc != num_allocated) { 6315 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6316 "0708 Allocation request of %d " 6317 "command buffers did not succeed. " 6318 "Allocated %d buffers.\n", 6319 num_to_alloc, num_allocated); 6320 } 6321 if (num_allocated > 0) 6322 phba->total_scsi_bufs += num_allocated; 6323 return 0; 6324 } 6325 6326 /** 6327 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6328 * @sdev: Pointer to scsi_device. 6329 * 6330 * This routine configures following items 6331 * - Tag command queuing support for @sdev if supported. 6332 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6333 * 6334 * Return codes: 6335 * 0 - Success 6336 **/ 6337 static int 6338 lpfc_slave_configure(struct scsi_device *sdev) 6339 { 6340 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6341 struct lpfc_hba *phba = vport->phba; 6342 6343 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6344 6345 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6346 lpfc_sli_handle_fast_ring_event(phba, 6347 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6348 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6349 lpfc_poll_rearm_timer(phba); 6350 } 6351 6352 return 0; 6353 } 6354 6355 /** 6356 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6357 * @sdev: Pointer to scsi_device. 6358 * 6359 * This routine sets @sdev hostatdata filed to null. 6360 **/ 6361 static void 6362 lpfc_slave_destroy(struct scsi_device *sdev) 6363 { 6364 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6365 struct lpfc_hba *phba = vport->phba; 6366 unsigned long flags; 6367 struct lpfc_device_data *device_data = sdev->hostdata; 6368 6369 atomic_dec(&phba->sdev_cnt); 6370 if ((phba->cfg_fof) && (device_data)) { 6371 spin_lock_irqsave(&phba->devicelock, flags); 6372 device_data->available = false; 6373 if (!device_data->oas_enabled) 6374 lpfc_delete_device_data(phba, device_data); 6375 spin_unlock_irqrestore(&phba->devicelock, flags); 6376 } 6377 sdev->hostdata = NULL; 6378 return; 6379 } 6380 6381 /** 6382 * lpfc_create_device_data - creates and initializes device data structure for OAS 6383 * @phba: Pointer to host bus adapter structure. 6384 * @vport_wwpn: Pointer to vport's wwpn information 6385 * @target_wwpn: Pointer to target's wwpn information 6386 * @lun: Lun on target 6387 * @pri: Priority 6388 * @atomic_create: Flag to indicate if memory should be allocated using the 6389 * GFP_ATOMIC flag or not. 6390 * 6391 * This routine creates a device data structure which will contain identifying 6392 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6393 * whether or not the corresponding lun is available by the system, 6394 * and pointer to the rport data. 6395 * 6396 * Return codes: 6397 * NULL - Error 6398 * Pointer to lpfc_device_data - Success 6399 **/ 6400 struct lpfc_device_data* 6401 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6402 struct lpfc_name *target_wwpn, uint64_t lun, 6403 uint32_t pri, bool atomic_create) 6404 { 6405 6406 struct lpfc_device_data *lun_info; 6407 int memory_flags; 6408 6409 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6410 !(phba->cfg_fof)) 6411 return NULL; 6412 6413 /* Attempt to create the device data to contain lun info */ 6414 6415 if (atomic_create) 6416 memory_flags = GFP_ATOMIC; 6417 else 6418 memory_flags = GFP_KERNEL; 6419 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6420 if (!lun_info) 6421 return NULL; 6422 INIT_LIST_HEAD(&lun_info->listentry); 6423 lun_info->rport_data = NULL; 6424 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6425 sizeof(struct lpfc_name)); 6426 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6427 sizeof(struct lpfc_name)); 6428 lun_info->device_id.lun = lun; 6429 lun_info->oas_enabled = false; 6430 lun_info->priority = pri; 6431 lun_info->available = false; 6432 return lun_info; 6433 } 6434 6435 /** 6436 * lpfc_delete_device_data - frees a device data structure for OAS 6437 * @phba: Pointer to host bus adapter structure. 6438 * @lun_info: Pointer to device data structure to free. 6439 * 6440 * This routine frees the previously allocated device data structure passed. 6441 * 6442 **/ 6443 void 6444 lpfc_delete_device_data(struct lpfc_hba *phba, 6445 struct lpfc_device_data *lun_info) 6446 { 6447 6448 if (unlikely(!phba) || !lun_info || 6449 !(phba->cfg_fof)) 6450 return; 6451 6452 if (!list_empty(&lun_info->listentry)) 6453 list_del(&lun_info->listentry); 6454 mempool_free(lun_info, phba->device_data_mem_pool); 6455 return; 6456 } 6457 6458 /** 6459 * __lpfc_get_device_data - returns the device data for the specified lun 6460 * @phba: Pointer to host bus adapter structure. 6461 * @list: Point to list to search. 6462 * @vport_wwpn: Pointer to vport's wwpn information 6463 * @target_wwpn: Pointer to target's wwpn information 6464 * @lun: Lun on target 6465 * 6466 * This routine searches the list passed for the specified lun's device data. 6467 * This function does not hold locks, it is the responsibility of the caller 6468 * to ensure the proper lock is held before calling the function. 6469 * 6470 * Return codes: 6471 * NULL - Error 6472 * Pointer to lpfc_device_data - Success 6473 **/ 6474 struct lpfc_device_data* 6475 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6476 struct lpfc_name *vport_wwpn, 6477 struct lpfc_name *target_wwpn, uint64_t lun) 6478 { 6479 6480 struct lpfc_device_data *lun_info; 6481 6482 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6483 !phba->cfg_fof) 6484 return NULL; 6485 6486 /* Check to see if the lun is already enabled for OAS. */ 6487 6488 list_for_each_entry(lun_info, list, listentry) { 6489 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6490 sizeof(struct lpfc_name)) == 0) && 6491 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6492 sizeof(struct lpfc_name)) == 0) && 6493 (lun_info->device_id.lun == lun)) 6494 return lun_info; 6495 } 6496 6497 return NULL; 6498 } 6499 6500 /** 6501 * lpfc_find_next_oas_lun - searches for the next oas lun 6502 * @phba: Pointer to host bus adapter structure. 6503 * @vport_wwpn: Pointer to vport's wwpn information 6504 * @target_wwpn: Pointer to target's wwpn information 6505 * @starting_lun: Pointer to the lun to start searching for 6506 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6507 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6508 * @found_lun: Pointer to the found lun. 6509 * @found_lun_status: Pointer to status of the found lun. 6510 * @found_lun_pri: Pointer to priority of the found lun. 6511 * 6512 * This routine searches the luns list for the specified lun 6513 * or the first lun for the vport/target. If the vport wwpn contains 6514 * a zero value then a specific vport is not specified. In this case 6515 * any vport which contains the lun will be considered a match. If the 6516 * target wwpn contains a zero value then a specific target is not specified. 6517 * In this case any target which contains the lun will be considered a 6518 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6519 * are returned. The function will also return the next lun if available. 6520 * If the next lun is not found, starting_lun parameter will be set to 6521 * NO_MORE_OAS_LUN. 6522 * 6523 * Return codes: 6524 * non-0 - Error 6525 * 0 - Success 6526 **/ 6527 bool 6528 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6529 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6530 struct lpfc_name *found_vport_wwpn, 6531 struct lpfc_name *found_target_wwpn, 6532 uint64_t *found_lun, 6533 uint32_t *found_lun_status, 6534 uint32_t *found_lun_pri) 6535 { 6536 6537 unsigned long flags; 6538 struct lpfc_device_data *lun_info; 6539 struct lpfc_device_id *device_id; 6540 uint64_t lun; 6541 bool found = false; 6542 6543 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6544 !starting_lun || !found_vport_wwpn || 6545 !found_target_wwpn || !found_lun || !found_lun_status || 6546 (*starting_lun == NO_MORE_OAS_LUN) || 6547 !phba->cfg_fof) 6548 return false; 6549 6550 lun = *starting_lun; 6551 *found_lun = NO_MORE_OAS_LUN; 6552 *starting_lun = NO_MORE_OAS_LUN; 6553 6554 /* Search for lun or the lun closet in value */ 6555 6556 spin_lock_irqsave(&phba->devicelock, flags); 6557 list_for_each_entry(lun_info, &phba->luns, listentry) { 6558 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6559 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6560 sizeof(struct lpfc_name)) == 0)) && 6561 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 6562 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6563 sizeof(struct lpfc_name)) == 0)) && 6564 (lun_info->oas_enabled)) { 6565 device_id = &lun_info->device_id; 6566 if ((!found) && 6567 ((lun == FIND_FIRST_OAS_LUN) || 6568 (device_id->lun == lun))) { 6569 *found_lun = device_id->lun; 6570 memcpy(found_vport_wwpn, 6571 &device_id->vport_wwpn, 6572 sizeof(struct lpfc_name)); 6573 memcpy(found_target_wwpn, 6574 &device_id->target_wwpn, 6575 sizeof(struct lpfc_name)); 6576 if (lun_info->available) 6577 *found_lun_status = 6578 OAS_LUN_STATUS_EXISTS; 6579 else 6580 *found_lun_status = 0; 6581 *found_lun_pri = lun_info->priority; 6582 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 6583 memset(vport_wwpn, 0x0, 6584 sizeof(struct lpfc_name)); 6585 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 6586 memset(target_wwpn, 0x0, 6587 sizeof(struct lpfc_name)); 6588 found = true; 6589 } else if (found) { 6590 *starting_lun = device_id->lun; 6591 memcpy(vport_wwpn, &device_id->vport_wwpn, 6592 sizeof(struct lpfc_name)); 6593 memcpy(target_wwpn, &device_id->target_wwpn, 6594 sizeof(struct lpfc_name)); 6595 break; 6596 } 6597 } 6598 } 6599 spin_unlock_irqrestore(&phba->devicelock, flags); 6600 return found; 6601 } 6602 6603 /** 6604 * lpfc_enable_oas_lun - enables a lun for OAS operations 6605 * @phba: Pointer to host bus adapter structure. 6606 * @vport_wwpn: Pointer to vport's wwpn information 6607 * @target_wwpn: Pointer to target's wwpn information 6608 * @lun: Lun 6609 * @pri: Priority 6610 * 6611 * This routine enables a lun for oas operations. The routines does so by 6612 * doing the following : 6613 * 6614 * 1) Checks to see if the device data for the lun has been created. 6615 * 2) If found, sets the OAS enabled flag if not set and returns. 6616 * 3) Otherwise, creates a device data structure. 6617 * 4) If successfully created, indicates the device data is for an OAS lun, 6618 * indicates the lun is not available and add to the list of luns. 6619 * 6620 * Return codes: 6621 * false - Error 6622 * true - Success 6623 **/ 6624 bool 6625 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6626 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6627 { 6628 6629 struct lpfc_device_data *lun_info; 6630 unsigned long flags; 6631 6632 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6633 !phba->cfg_fof) 6634 return false; 6635 6636 spin_lock_irqsave(&phba->devicelock, flags); 6637 6638 /* Check to see if the device data for the lun has been created */ 6639 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 6640 target_wwpn, lun); 6641 if (lun_info) { 6642 if (!lun_info->oas_enabled) 6643 lun_info->oas_enabled = true; 6644 lun_info->priority = pri; 6645 spin_unlock_irqrestore(&phba->devicelock, flags); 6646 return true; 6647 } 6648 6649 /* Create an lun info structure and add to list of luns */ 6650 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 6651 pri, true); 6652 if (lun_info) { 6653 lun_info->oas_enabled = true; 6654 lun_info->priority = pri; 6655 lun_info->available = false; 6656 list_add_tail(&lun_info->listentry, &phba->luns); 6657 spin_unlock_irqrestore(&phba->devicelock, flags); 6658 return true; 6659 } 6660 spin_unlock_irqrestore(&phba->devicelock, flags); 6661 return false; 6662 } 6663 6664 /** 6665 * lpfc_disable_oas_lun - disables a lun for OAS operations 6666 * @phba: Pointer to host bus adapter structure. 6667 * @vport_wwpn: Pointer to vport's wwpn information 6668 * @target_wwpn: Pointer to target's wwpn information 6669 * @lun: Lun 6670 * @pri: Priority 6671 * 6672 * This routine disables a lun for oas operations. The routines does so by 6673 * doing the following : 6674 * 6675 * 1) Checks to see if the device data for the lun is created. 6676 * 2) If present, clears the flag indicating this lun is for OAS. 6677 * 3) If the lun is not available by the system, the device data is 6678 * freed. 6679 * 6680 * Return codes: 6681 * false - Error 6682 * true - Success 6683 **/ 6684 bool 6685 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6686 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6687 { 6688 6689 struct lpfc_device_data *lun_info; 6690 unsigned long flags; 6691 6692 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6693 !phba->cfg_fof) 6694 return false; 6695 6696 spin_lock_irqsave(&phba->devicelock, flags); 6697 6698 /* Check to see if the lun is available. */ 6699 lun_info = __lpfc_get_device_data(phba, 6700 &phba->luns, vport_wwpn, 6701 target_wwpn, lun); 6702 if (lun_info) { 6703 lun_info->oas_enabled = false; 6704 lun_info->priority = pri; 6705 if (!lun_info->available) 6706 lpfc_delete_device_data(phba, lun_info); 6707 spin_unlock_irqrestore(&phba->devicelock, flags); 6708 return true; 6709 } 6710 6711 spin_unlock_irqrestore(&phba->devicelock, flags); 6712 return false; 6713 } 6714 6715 static int 6716 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 6717 { 6718 return SCSI_MLQUEUE_HOST_BUSY; 6719 } 6720 6721 static int 6722 lpfc_no_slave(struct scsi_device *sdev) 6723 { 6724 return -ENODEV; 6725 } 6726 6727 struct scsi_host_template lpfc_template_nvme = { 6728 .module = THIS_MODULE, 6729 .name = LPFC_DRIVER_NAME, 6730 .proc_name = LPFC_DRIVER_NAME, 6731 .info = lpfc_info, 6732 .queuecommand = lpfc_no_command, 6733 .slave_alloc = lpfc_no_slave, 6734 .slave_configure = lpfc_no_slave, 6735 .scan_finished = lpfc_scan_finished, 6736 .this_id = -1, 6737 .sg_tablesize = 1, 6738 .cmd_per_lun = 1, 6739 .shost_groups = lpfc_hba_groups, 6740 .max_sectors = 0xFFFFFFFF, 6741 .vendor_id = LPFC_NL_VENDOR_ID, 6742 .track_queue_depth = 0, 6743 }; 6744 6745 struct scsi_host_template lpfc_template = { 6746 .module = THIS_MODULE, 6747 .name = LPFC_DRIVER_NAME, 6748 .proc_name = LPFC_DRIVER_NAME, 6749 .info = lpfc_info, 6750 .queuecommand = lpfc_queuecommand, 6751 .eh_timed_out = fc_eh_timed_out, 6752 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 6753 .eh_abort_handler = lpfc_abort_handler, 6754 .eh_device_reset_handler = lpfc_device_reset_handler, 6755 .eh_target_reset_handler = lpfc_target_reset_handler, 6756 .eh_host_reset_handler = lpfc_host_reset_handler, 6757 .slave_alloc = lpfc_slave_alloc, 6758 .slave_configure = lpfc_slave_configure, 6759 .slave_destroy = lpfc_slave_destroy, 6760 .scan_finished = lpfc_scan_finished, 6761 .this_id = -1, 6762 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6763 .cmd_per_lun = LPFC_CMD_PER_LUN, 6764 .shost_groups = lpfc_hba_groups, 6765 .max_sectors = 0xFFFFFFFF, 6766 .vendor_id = LPFC_NL_VENDOR_ID, 6767 .change_queue_depth = scsi_change_queue_depth, 6768 .track_queue_depth = 1, 6769 }; 6770 6771 struct scsi_host_template lpfc_vport_template = { 6772 .module = THIS_MODULE, 6773 .name = LPFC_DRIVER_NAME, 6774 .proc_name = LPFC_DRIVER_NAME, 6775 .info = lpfc_info, 6776 .queuecommand = lpfc_queuecommand, 6777 .eh_timed_out = fc_eh_timed_out, 6778 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 6779 .eh_abort_handler = lpfc_abort_handler, 6780 .eh_device_reset_handler = lpfc_device_reset_handler, 6781 .eh_target_reset_handler = lpfc_target_reset_handler, 6782 .eh_bus_reset_handler = NULL, 6783 .eh_host_reset_handler = NULL, 6784 .slave_alloc = lpfc_slave_alloc, 6785 .slave_configure = lpfc_slave_configure, 6786 .slave_destroy = lpfc_slave_destroy, 6787 .scan_finished = lpfc_scan_finished, 6788 .this_id = -1, 6789 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6790 .cmd_per_lun = LPFC_CMD_PER_LUN, 6791 .shost_groups = lpfc_vport_groups, 6792 .max_sectors = 0xFFFFFFFF, 6793 .vendor_id = 0, 6794 .change_queue_depth = scsi_change_queue_depth, 6795 .track_queue_depth = 1, 6796 }; 6797