1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <linux/blk-cgroup.h> 32 #include <net/checksum.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_eh.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_version.h" 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 54 #define LPFC_RESET_WAIT 2 55 #define LPFC_ABORT_WAIT 2 56 57 static char *dif_op_str[] = { 58 "PROT_NORMAL", 59 "PROT_READ_INSERT", 60 "PROT_WRITE_STRIP", 61 "PROT_READ_STRIP", 62 "PROT_WRITE_INSERT", 63 "PROT_READ_PASS", 64 "PROT_WRITE_PASS", 65 }; 66 67 struct scsi_dif_tuple { 68 __be16 guard_tag; /* Checksum */ 69 __be16 app_tag; /* Opaque storage */ 70 __be32 ref_tag; /* Target LBA or indirect LBA */ 71 }; 72 73 static struct lpfc_rport_data * 74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 75 { 76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 77 78 if (vport->phba->cfg_fof) 79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 80 else 81 return (struct lpfc_rport_data *)sdev->hostdata; 82 } 83 84 static void 85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 86 static void 87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 88 static int 89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 90 91 /** 92 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 93 * @phba: Pointer to HBA object. 94 * @lpfc_cmd: lpfc scsi command object pointer. 95 * 96 * This function is called from the lpfc_prep_task_mgmt_cmd function to 97 * set the last bit in the response sge entry. 98 **/ 99 static void 100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 101 struct lpfc_io_buf *lpfc_cmd) 102 { 103 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 104 if (sgl) { 105 sgl += 1; 106 sgl->word2 = le32_to_cpu(sgl->word2); 107 bf_set(lpfc_sli4_sge_last, sgl, 1); 108 sgl->word2 = cpu_to_le32(sgl->word2); 109 } 110 } 111 112 #define LPFC_INVALID_REFTAG ((u32)-1) 113 114 /** 115 * lpfc_update_stats - Update statistical data for the command completion 116 * @vport: The virtual port on which this call is executing. 117 * @lpfc_cmd: lpfc scsi command object pointer. 118 * 119 * This function is called when there is a command completion and this 120 * function updates the statistical data for the command completion. 121 **/ 122 static void 123 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 124 { 125 struct lpfc_hba *phba = vport->phba; 126 struct lpfc_rport_data *rdata; 127 struct lpfc_nodelist *pnode; 128 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 129 unsigned long flags; 130 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 131 unsigned long latency; 132 int i; 133 134 if (!vport->stat_data_enabled || 135 vport->stat_data_blocked || 136 (cmd->result)) 137 return; 138 139 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 140 rdata = lpfc_cmd->rdata; 141 pnode = rdata->pnode; 142 143 spin_lock_irqsave(shost->host_lock, flags); 144 if (!pnode || 145 !pnode->lat_data || 146 (phba->bucket_type == LPFC_NO_BUCKET)) { 147 spin_unlock_irqrestore(shost->host_lock, flags); 148 return; 149 } 150 151 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 152 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 153 phba->bucket_step; 154 /* check array subscript bounds */ 155 if (i < 0) 156 i = 0; 157 else if (i >= LPFC_MAX_BUCKET_COUNT) 158 i = LPFC_MAX_BUCKET_COUNT - 1; 159 } else { 160 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 161 if (latency <= (phba->bucket_base + 162 ((1<<i)*phba->bucket_step))) 163 break; 164 } 165 166 pnode->lat_data[i].cmd_count++; 167 spin_unlock_irqrestore(shost->host_lock, flags); 168 } 169 170 /** 171 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 172 * @phba: The Hba for which this call is being executed. 173 * 174 * This routine is called when there is resource error in driver or firmware. 175 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 176 * posts at most 1 event each second. This routine wakes up worker thread of 177 * @phba to process WORKER_RAM_DOWN_EVENT event. 178 * 179 * This routine should be called with no lock held. 180 **/ 181 void 182 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 183 { 184 unsigned long flags; 185 uint32_t evt_posted; 186 unsigned long expires; 187 188 spin_lock_irqsave(&phba->hbalock, flags); 189 atomic_inc(&phba->num_rsrc_err); 190 phba->last_rsrc_error_time = jiffies; 191 192 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 193 if (time_after(expires, jiffies)) { 194 spin_unlock_irqrestore(&phba->hbalock, flags); 195 return; 196 } 197 198 phba->last_ramp_down_time = jiffies; 199 200 spin_unlock_irqrestore(&phba->hbalock, flags); 201 202 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 203 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 204 if (!evt_posted) 205 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 206 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 207 208 if (!evt_posted) 209 lpfc_worker_wake_up(phba); 210 return; 211 } 212 213 /** 214 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 215 * @phba: The Hba for which this call is being executed. 216 * 217 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 218 * thread.This routine reduces queue depth for all scsi device on each vport 219 * associated with @phba. 220 **/ 221 void 222 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 223 { 224 struct lpfc_vport **vports; 225 struct Scsi_Host *shost; 226 struct scsi_device *sdev; 227 unsigned long new_queue_depth; 228 unsigned long num_rsrc_err, num_cmd_success; 229 int i; 230 231 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 232 num_cmd_success = atomic_read(&phba->num_cmd_success); 233 234 /* 235 * The error and success command counters are global per 236 * driver instance. If another handler has already 237 * operated on this error event, just exit. 238 */ 239 if (num_rsrc_err == 0) 240 return; 241 242 vports = lpfc_create_vport_work_array(phba); 243 if (vports != NULL) 244 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 245 shost = lpfc_shost_from_vport(vports[i]); 246 shost_for_each_device(sdev, shost) { 247 new_queue_depth = 248 sdev->queue_depth * num_rsrc_err / 249 (num_rsrc_err + num_cmd_success); 250 if (!new_queue_depth) 251 new_queue_depth = sdev->queue_depth - 1; 252 else 253 new_queue_depth = sdev->queue_depth - 254 new_queue_depth; 255 scsi_change_queue_depth(sdev, new_queue_depth); 256 } 257 } 258 lpfc_destroy_vport_work_array(phba, vports); 259 atomic_set(&phba->num_rsrc_err, 0); 260 atomic_set(&phba->num_cmd_success, 0); 261 } 262 263 /** 264 * lpfc_scsi_dev_block - set all scsi hosts to block state 265 * @phba: Pointer to HBA context object. 266 * 267 * This function walks vport list and set each SCSI host to block state 268 * by invoking fc_remote_port_delete() routine. This function is invoked 269 * with EEH when device's PCI slot has been permanently disabled. 270 **/ 271 void 272 lpfc_scsi_dev_block(struct lpfc_hba *phba) 273 { 274 struct lpfc_vport **vports; 275 struct Scsi_Host *shost; 276 struct scsi_device *sdev; 277 struct fc_rport *rport; 278 int i; 279 280 vports = lpfc_create_vport_work_array(phba); 281 if (vports != NULL) 282 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 283 shost = lpfc_shost_from_vport(vports[i]); 284 shost_for_each_device(sdev, shost) { 285 rport = starget_to_rport(scsi_target(sdev)); 286 fc_remote_port_delete(rport); 287 } 288 } 289 lpfc_destroy_vport_work_array(phba, vports); 290 } 291 292 /** 293 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 294 * @vport: The virtual port for which this call being executed. 295 * @num_to_alloc: The requested number of buffers to allocate. 296 * 297 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 298 * the scsi buffer contains all the necessary information needed to initiate 299 * a SCSI I/O. The non-DMAable buffer region contains information to build 300 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 301 * and the initial BPL. In addition to allocating memory, the FCP CMND and 302 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 303 * 304 * Return codes: 305 * int - number of scsi buffers that were allocated. 306 * 0 = failure, less than num_to_alloc is a partial failure. 307 **/ 308 static int 309 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 310 { 311 struct lpfc_hba *phba = vport->phba; 312 struct lpfc_io_buf *psb; 313 struct ulp_bde64 *bpl; 314 IOCB_t *iocb; 315 dma_addr_t pdma_phys_fcp_cmd; 316 dma_addr_t pdma_phys_fcp_rsp; 317 dma_addr_t pdma_phys_sgl; 318 uint16_t iotag; 319 int bcnt, bpl_size; 320 321 bpl_size = phba->cfg_sg_dma_buf_size - 322 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 323 324 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 325 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 326 num_to_alloc, phba->cfg_sg_dma_buf_size, 327 (int)sizeof(struct fcp_cmnd), 328 (int)sizeof(struct fcp_rsp), bpl_size); 329 330 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 331 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 332 if (!psb) 333 break; 334 335 /* 336 * Get memory from the pci pool to map the virt space to pci 337 * bus space for an I/O. The DMA buffer includes space for the 338 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 339 * necessary to support the sg_tablesize. 340 */ 341 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 342 GFP_KERNEL, &psb->dma_handle); 343 if (!psb->data) { 344 kfree(psb); 345 break; 346 } 347 348 349 /* Allocate iotag for psb->cur_iocbq. */ 350 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 351 if (iotag == 0) { 352 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 353 psb->data, psb->dma_handle); 354 kfree(psb); 355 break; 356 } 357 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP; 358 359 psb->fcp_cmnd = psb->data; 360 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 361 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 362 sizeof(struct fcp_rsp); 363 364 /* Initialize local short-hand pointers. */ 365 bpl = (struct ulp_bde64 *)psb->dma_sgl; 366 pdma_phys_fcp_cmd = psb->dma_handle; 367 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 368 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 369 sizeof(struct fcp_rsp); 370 371 /* 372 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 373 * are sg list bdes. Initialize the first two and leave the 374 * rest for queuecommand. 375 */ 376 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 377 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 378 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 379 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 380 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 381 382 /* Setup the physical region for the FCP RSP */ 383 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 384 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 385 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 386 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 387 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 388 389 /* 390 * Since the IOCB for the FCP I/O is built into this 391 * lpfc_scsi_buf, initialize it with all known data now. 392 */ 393 iocb = &psb->cur_iocbq.iocb; 394 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 395 if ((phba->sli_rev == 3) && 396 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 397 /* fill in immediate fcp command BDE */ 398 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 399 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 400 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 401 unsli3.fcp_ext.icd); 402 iocb->un.fcpi64.bdl.addrHigh = 0; 403 iocb->ulpBdeCount = 0; 404 iocb->ulpLe = 0; 405 /* fill in response BDE */ 406 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 407 BUFF_TYPE_BDE_64; 408 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 409 sizeof(struct fcp_rsp); 410 iocb->unsli3.fcp_ext.rbde.addrLow = 411 putPaddrLow(pdma_phys_fcp_rsp); 412 iocb->unsli3.fcp_ext.rbde.addrHigh = 413 putPaddrHigh(pdma_phys_fcp_rsp); 414 } else { 415 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 416 iocb->un.fcpi64.bdl.bdeSize = 417 (2 * sizeof(struct ulp_bde64)); 418 iocb->un.fcpi64.bdl.addrLow = 419 putPaddrLow(pdma_phys_sgl); 420 iocb->un.fcpi64.bdl.addrHigh = 421 putPaddrHigh(pdma_phys_sgl); 422 iocb->ulpBdeCount = 1; 423 iocb->ulpLe = 1; 424 } 425 iocb->ulpClass = CLASS3; 426 psb->status = IOSTAT_SUCCESS; 427 /* Put it back into the SCSI buffer list */ 428 psb->cur_iocbq.io_buf = psb; 429 spin_lock_init(&psb->buf_lock); 430 lpfc_release_scsi_buf_s3(phba, psb); 431 432 } 433 434 return bcnt; 435 } 436 437 /** 438 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 439 * @vport: pointer to lpfc vport data structure. 440 * 441 * This routine is invoked by the vport cleanup for deletions and the cleanup 442 * for an ndlp on removal. 443 **/ 444 void 445 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 446 { 447 struct lpfc_hba *phba = vport->phba; 448 struct lpfc_io_buf *psb, *next_psb; 449 struct lpfc_sli4_hdw_queue *qp; 450 unsigned long iflag = 0; 451 int idx; 452 453 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 454 return; 455 456 spin_lock_irqsave(&phba->hbalock, iflag); 457 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 458 qp = &phba->sli4_hba.hdwq[idx]; 459 460 spin_lock(&qp->abts_io_buf_list_lock); 461 list_for_each_entry_safe(psb, next_psb, 462 &qp->lpfc_abts_io_buf_list, list) { 463 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) 464 continue; 465 466 if (psb->rdata && psb->rdata->pnode && 467 psb->rdata->pnode->vport == vport) 468 psb->rdata = NULL; 469 } 470 spin_unlock(&qp->abts_io_buf_list_lock); 471 } 472 spin_unlock_irqrestore(&phba->hbalock, iflag); 473 } 474 475 /** 476 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 477 * @phba: pointer to lpfc hba data structure. 478 * @axri: pointer to the fcp xri abort wcqe structure. 479 * @idx: index into hdwq 480 * 481 * This routine is invoked by the worker thread to process a SLI4 fast-path 482 * FCP or NVME aborted xri. 483 **/ 484 void 485 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 486 struct sli4_wcqe_xri_aborted *axri, int idx) 487 { 488 u16 xri = 0; 489 u16 rxid = 0; 490 struct lpfc_io_buf *psb, *next_psb; 491 struct lpfc_sli4_hdw_queue *qp; 492 unsigned long iflag = 0; 493 struct lpfc_iocbq *iocbq; 494 int i; 495 struct lpfc_nodelist *ndlp; 496 int rrq_empty = 0; 497 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 498 struct scsi_cmnd *cmd; 499 int offline = 0; 500 501 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 502 return; 503 offline = pci_channel_offline(phba->pcidev); 504 if (!offline) { 505 xri = bf_get(lpfc_wcqe_xa_xri, axri); 506 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 507 } 508 qp = &phba->sli4_hba.hdwq[idx]; 509 spin_lock_irqsave(&phba->hbalock, iflag); 510 spin_lock(&qp->abts_io_buf_list_lock); 511 list_for_each_entry_safe(psb, next_psb, 512 &qp->lpfc_abts_io_buf_list, list) { 513 if (offline) 514 xri = psb->cur_iocbq.sli4_xritag; 515 if (psb->cur_iocbq.sli4_xritag == xri) { 516 list_del_init(&psb->list); 517 psb->flags &= ~LPFC_SBUF_XBUSY; 518 psb->status = IOSTAT_SUCCESS; 519 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) { 520 qp->abts_nvme_io_bufs--; 521 spin_unlock(&qp->abts_io_buf_list_lock); 522 spin_unlock_irqrestore(&phba->hbalock, iflag); 523 if (!offline) { 524 lpfc_sli4_nvme_xri_aborted(phba, axri, 525 psb); 526 return; 527 } 528 lpfc_sli4_nvme_pci_offline_aborted(phba, psb); 529 spin_lock_irqsave(&phba->hbalock, iflag); 530 spin_lock(&qp->abts_io_buf_list_lock); 531 continue; 532 } 533 qp->abts_scsi_io_bufs--; 534 spin_unlock(&qp->abts_io_buf_list_lock); 535 536 if (psb->rdata && psb->rdata->pnode) 537 ndlp = psb->rdata->pnode; 538 else 539 ndlp = NULL; 540 541 rrq_empty = list_empty(&phba->active_rrq_list); 542 spin_unlock_irqrestore(&phba->hbalock, iflag); 543 if (ndlp && !offline) { 544 lpfc_set_rrq_active(phba, ndlp, 545 psb->cur_iocbq.sli4_lxritag, rxid, 1); 546 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 547 } 548 549 if (phba->cfg_fcp_wait_abts_rsp || offline) { 550 spin_lock_irqsave(&psb->buf_lock, iflag); 551 cmd = psb->pCmd; 552 psb->pCmd = NULL; 553 spin_unlock_irqrestore(&psb->buf_lock, iflag); 554 555 /* The sdev is not guaranteed to be valid post 556 * scsi_done upcall. 557 */ 558 if (cmd) 559 scsi_done(cmd); 560 561 /* 562 * We expect there is an abort thread waiting 563 * for command completion wake up the thread. 564 */ 565 spin_lock_irqsave(&psb->buf_lock, iflag); 566 psb->cur_iocbq.cmd_flag &= 567 ~LPFC_DRIVER_ABORTED; 568 if (psb->waitq) 569 wake_up(psb->waitq); 570 spin_unlock_irqrestore(&psb->buf_lock, iflag); 571 } 572 573 lpfc_release_scsi_buf_s4(phba, psb); 574 if (rrq_empty) 575 lpfc_worker_wake_up(phba); 576 if (!offline) 577 return; 578 spin_lock_irqsave(&phba->hbalock, iflag); 579 spin_lock(&qp->abts_io_buf_list_lock); 580 continue; 581 } 582 } 583 spin_unlock(&qp->abts_io_buf_list_lock); 584 if (!offline) { 585 for (i = 1; i <= phba->sli.last_iotag; i++) { 586 iocbq = phba->sli.iocbq_lookup[i]; 587 588 if (!(iocbq->cmd_flag & LPFC_IO_FCP) || 589 (iocbq->cmd_flag & LPFC_IO_LIBDFC)) 590 continue; 591 if (iocbq->sli4_xritag != xri) 592 continue; 593 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 594 psb->flags &= ~LPFC_SBUF_XBUSY; 595 spin_unlock_irqrestore(&phba->hbalock, iflag); 596 if (!list_empty(&pring->txq)) 597 lpfc_worker_wake_up(phba); 598 return; 599 } 600 } 601 spin_unlock_irqrestore(&phba->hbalock, iflag); 602 } 603 604 /** 605 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 606 * @phba: The HBA for which this call is being executed. 607 * @ndlp: pointer to a node-list data structure. 608 * @cmnd: Pointer to scsi_cmnd data structure. 609 * 610 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 611 * and returns to caller. 612 * 613 * Return codes: 614 * NULL - Error 615 * Pointer to lpfc_scsi_buf - Success 616 **/ 617 static struct lpfc_io_buf * 618 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 619 struct scsi_cmnd *cmnd) 620 { 621 struct lpfc_io_buf *lpfc_cmd = NULL; 622 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 623 unsigned long iflag = 0; 624 625 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 626 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 627 list); 628 if (!lpfc_cmd) { 629 spin_lock(&phba->scsi_buf_list_put_lock); 630 list_splice(&phba->lpfc_scsi_buf_list_put, 631 &phba->lpfc_scsi_buf_list_get); 632 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 633 list_remove_head(scsi_buf_list_get, lpfc_cmd, 634 struct lpfc_io_buf, list); 635 spin_unlock(&phba->scsi_buf_list_put_lock); 636 } 637 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 638 639 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 640 atomic_inc(&ndlp->cmd_pending); 641 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 642 } 643 return lpfc_cmd; 644 } 645 /** 646 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 647 * @phba: The HBA for which this call is being executed. 648 * @ndlp: pointer to a node-list data structure. 649 * @cmnd: Pointer to scsi_cmnd data structure. 650 * 651 * This routine removes a scsi buffer from head of @hdwq io_buf_list 652 * and returns to caller. 653 * 654 * Return codes: 655 * NULL - Error 656 * Pointer to lpfc_scsi_buf - Success 657 **/ 658 static struct lpfc_io_buf * 659 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 660 struct scsi_cmnd *cmnd) 661 { 662 struct lpfc_io_buf *lpfc_cmd; 663 struct lpfc_sli4_hdw_queue *qp; 664 struct sli4_sge *sgl; 665 dma_addr_t pdma_phys_fcp_rsp; 666 dma_addr_t pdma_phys_fcp_cmd; 667 uint32_t cpu, idx; 668 int tag; 669 struct fcp_cmd_rsp_buf *tmp = NULL; 670 671 cpu = raw_smp_processor_id(); 672 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 673 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 674 idx = blk_mq_unique_tag_to_hwq(tag); 675 } else { 676 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 677 } 678 679 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 680 !phba->cfg_xri_rebalancing); 681 if (!lpfc_cmd) { 682 qp = &phba->sli4_hba.hdwq[idx]; 683 qp->empty_io_bufs++; 684 return NULL; 685 } 686 687 /* Setup key fields in buffer that may have been changed 688 * if other protocols used this buffer. 689 */ 690 lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP; 691 lpfc_cmd->prot_seg_cnt = 0; 692 lpfc_cmd->seg_cnt = 0; 693 lpfc_cmd->timeout = 0; 694 lpfc_cmd->flags = 0; 695 lpfc_cmd->start_time = jiffies; 696 lpfc_cmd->waitq = NULL; 697 lpfc_cmd->cpu = cpu; 698 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 699 lpfc_cmd->prot_data_type = 0; 700 #endif 701 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 702 if (!tmp) { 703 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 704 return NULL; 705 } 706 707 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 708 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 709 710 /* 711 * The first two SGEs are the FCP_CMD and FCP_RSP. 712 * The balance are sg list bdes. Initialize the 713 * first two and leave the rest for queuecommand. 714 */ 715 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 716 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 717 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 718 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 719 sgl->word2 = le32_to_cpu(sgl->word2); 720 bf_set(lpfc_sli4_sge_last, sgl, 0); 721 sgl->word2 = cpu_to_le32(sgl->word2); 722 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 723 sgl++; 724 725 /* Setup the physical region for the FCP RSP */ 726 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 727 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 728 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 729 sgl->word2 = le32_to_cpu(sgl->word2); 730 bf_set(lpfc_sli4_sge_last, sgl, 1); 731 sgl->word2 = cpu_to_le32(sgl->word2); 732 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 733 734 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 735 atomic_inc(&ndlp->cmd_pending); 736 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 737 } 738 return lpfc_cmd; 739 } 740 /** 741 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 742 * @phba: The HBA for which this call is being executed. 743 * @ndlp: pointer to a node-list data structure. 744 * @cmnd: Pointer to scsi_cmnd data structure. 745 * 746 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 747 * and returns to caller. 748 * 749 * Return codes: 750 * NULL - Error 751 * Pointer to lpfc_scsi_buf - Success 752 **/ 753 static struct lpfc_io_buf* 754 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 755 struct scsi_cmnd *cmnd) 756 { 757 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 758 } 759 760 /** 761 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 762 * @phba: The Hba for which this call is being executed. 763 * @psb: The scsi buffer which is being released. 764 * 765 * This routine releases @psb scsi buffer by adding it to tail of @phba 766 * lpfc_scsi_buf_list list. 767 **/ 768 static void 769 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 770 { 771 unsigned long iflag = 0; 772 773 psb->seg_cnt = 0; 774 psb->prot_seg_cnt = 0; 775 776 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 777 psb->pCmd = NULL; 778 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP; 779 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 780 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 781 } 782 783 /** 784 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 785 * @phba: The Hba for which this call is being executed. 786 * @psb: The scsi buffer which is being released. 787 * 788 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 789 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 790 * and cannot be reused for at least RA_TOV amount of time if it was 791 * aborted. 792 **/ 793 static void 794 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 795 { 796 struct lpfc_sli4_hdw_queue *qp; 797 unsigned long iflag = 0; 798 799 psb->seg_cnt = 0; 800 psb->prot_seg_cnt = 0; 801 802 qp = psb->hdwq; 803 if (psb->flags & LPFC_SBUF_XBUSY) { 804 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 805 if (!phba->cfg_fcp_wait_abts_rsp) 806 psb->pCmd = NULL; 807 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 808 qp->abts_scsi_io_bufs++; 809 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 810 } else { 811 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 812 } 813 } 814 815 /** 816 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 817 * @phba: The Hba for which this call is being executed. 818 * @psb: The scsi buffer which is being released. 819 * 820 * This routine releases @psb scsi buffer by adding it to tail of @phba 821 * lpfc_scsi_buf_list list. 822 **/ 823 static void 824 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 825 { 826 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 827 atomic_dec(&psb->ndlp->cmd_pending); 828 829 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 830 phba->lpfc_release_scsi_buf(phba, psb); 831 } 832 833 /** 834 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 835 * @data: A pointer to the immediate command data portion of the IOCB. 836 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 837 * 838 * The routine copies the entire FCP command from @fcp_cmnd to @data while 839 * byte swapping the data to big endian format for transmission on the wire. 840 **/ 841 static void 842 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 843 { 844 int i, j; 845 846 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 847 i += sizeof(uint32_t), j++) { 848 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 849 } 850 } 851 852 /** 853 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 854 * @phba: The Hba for which this call is being executed. 855 * @lpfc_cmd: The scsi buffer which is going to be mapped. 856 * 857 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 858 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 859 * through sg elements and format the bde. This routine also initializes all 860 * IOCB fields which are dependent on scsi command request buffer. 861 * 862 * Return codes: 863 * 1 - Error 864 * 0 - Success 865 **/ 866 static int 867 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 868 { 869 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 870 struct scatterlist *sgel = NULL; 871 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 872 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 873 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 874 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 875 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 876 dma_addr_t physaddr; 877 uint32_t num_bde = 0; 878 int nseg, datadir = scsi_cmnd->sc_data_direction; 879 880 /* 881 * There are three possibilities here - use scatter-gather segment, use 882 * the single mapping, or neither. Start the lpfc command prep by 883 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 884 * data bde entry. 885 */ 886 bpl += 2; 887 if (scsi_sg_count(scsi_cmnd)) { 888 /* 889 * The driver stores the segment count returned from dma_map_sg 890 * because this a count of dma-mappings used to map the use_sg 891 * pages. They are not guaranteed to be the same for those 892 * architectures that implement an IOMMU. 893 */ 894 895 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 896 scsi_sg_count(scsi_cmnd), datadir); 897 if (unlikely(!nseg)) 898 return 1; 899 900 lpfc_cmd->seg_cnt = nseg; 901 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 903 "9064 BLKGRD: %s: Too many sg segments" 904 " from dma_map_sg. Config %d, seg_cnt" 905 " %d\n", __func__, phba->cfg_sg_seg_cnt, 906 lpfc_cmd->seg_cnt); 907 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 908 lpfc_cmd->seg_cnt = 0; 909 scsi_dma_unmap(scsi_cmnd); 910 return 2; 911 } 912 913 /* 914 * The driver established a maximum scatter-gather segment count 915 * during probe that limits the number of sg elements in any 916 * single scsi command. Just run through the seg_cnt and format 917 * the bde's. 918 * When using SLI-3 the driver will try to fit all the BDEs into 919 * the IOCB. If it can't then the BDEs get added to a BPL as it 920 * does for SLI-2 mode. 921 */ 922 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 923 physaddr = sg_dma_address(sgel); 924 if (phba->sli_rev == 3 && 925 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 926 !(iocbq->cmd_flag & DSS_SECURITY_OP) && 927 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 928 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 929 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 930 data_bde->addrLow = putPaddrLow(physaddr); 931 data_bde->addrHigh = putPaddrHigh(physaddr); 932 data_bde++; 933 } else { 934 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 935 bpl->tus.f.bdeSize = sg_dma_len(sgel); 936 bpl->tus.w = le32_to_cpu(bpl->tus.w); 937 bpl->addrLow = 938 le32_to_cpu(putPaddrLow(physaddr)); 939 bpl->addrHigh = 940 le32_to_cpu(putPaddrHigh(physaddr)); 941 bpl++; 942 } 943 } 944 } 945 946 /* 947 * Finish initializing those IOCB fields that are dependent on the 948 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 949 * explicitly reinitialized and for SLI-3 the extended bde count is 950 * explicitly reinitialized since all iocb memory resources are reused. 951 */ 952 if (phba->sli_rev == 3 && 953 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 954 !(iocbq->cmd_flag & DSS_SECURITY_OP)) { 955 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 956 /* 957 * The extended IOCB format can only fit 3 BDE or a BPL. 958 * This I/O has more than 3 BDE so the 1st data bde will 959 * be a BPL that is filled in here. 960 */ 961 physaddr = lpfc_cmd->dma_handle; 962 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 963 data_bde->tus.f.bdeSize = (num_bde * 964 sizeof(struct ulp_bde64)); 965 physaddr += (sizeof(struct fcp_cmnd) + 966 sizeof(struct fcp_rsp) + 967 (2 * sizeof(struct ulp_bde64))); 968 data_bde->addrHigh = putPaddrHigh(physaddr); 969 data_bde->addrLow = putPaddrLow(physaddr); 970 /* ebde count includes the response bde and data bpl */ 971 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 972 } else { 973 /* ebde count includes the response bde and data bdes */ 974 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 975 } 976 } else { 977 iocb_cmd->un.fcpi64.bdl.bdeSize = 978 ((num_bde + 2) * sizeof(struct ulp_bde64)); 979 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 980 } 981 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 982 983 /* 984 * Due to difference in data length between DIF/non-DIF paths, 985 * we need to set word 4 of IOCB here 986 */ 987 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 988 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 989 return 0; 990 } 991 992 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 993 994 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 995 #define BG_ERR_INIT 0x1 996 /* Return BG_ERR_TGT if error injection is detected by Target */ 997 #define BG_ERR_TGT 0x2 998 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 999 #define BG_ERR_SWAP 0x10 1000 /* 1001 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 1002 * error injection 1003 */ 1004 #define BG_ERR_CHECK 0x20 1005 1006 /** 1007 * lpfc_bg_err_inject - Determine if we should inject an error 1008 * @phba: The Hba for which this call is being executed. 1009 * @sc: The SCSI command to examine 1010 * @reftag: (out) BlockGuard reference tag for transmitted data 1011 * @apptag: (out) BlockGuard application tag for transmitted data 1012 * @new_guard: (in) Value to replace CRC with if needed 1013 * 1014 * Returns BG_ERR_* bit mask or 0 if request ignored 1015 **/ 1016 static int 1017 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1018 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1019 { 1020 struct scatterlist *sgpe; /* s/g prot entry */ 1021 struct lpfc_io_buf *lpfc_cmd = NULL; 1022 struct scsi_dif_tuple *src = NULL; 1023 struct lpfc_nodelist *ndlp; 1024 struct lpfc_rport_data *rdata; 1025 uint32_t op = scsi_get_prot_op(sc); 1026 uint32_t blksize; 1027 uint32_t numblks; 1028 u32 lba; 1029 int rc = 0; 1030 int blockoff = 0; 1031 1032 if (op == SCSI_PROT_NORMAL) 1033 return 0; 1034 1035 sgpe = scsi_prot_sglist(sc); 1036 lba = scsi_prot_ref_tag(sc); 1037 if (lba == LPFC_INVALID_REFTAG) 1038 return 0; 1039 1040 /* First check if we need to match the LBA */ 1041 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1042 blksize = scsi_prot_interval(sc); 1043 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1044 1045 /* Make sure we have the right LBA if one is specified */ 1046 if (phba->lpfc_injerr_lba < (u64)lba || 1047 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 1048 return 0; 1049 if (sgpe) { 1050 blockoff = phba->lpfc_injerr_lba - (u64)lba; 1051 numblks = sg_dma_len(sgpe) / 1052 sizeof(struct scsi_dif_tuple); 1053 if (numblks < blockoff) 1054 blockoff = numblks; 1055 } 1056 } 1057 1058 /* Next check if we need to match the remote NPortID or WWPN */ 1059 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1060 if (rdata && rdata->pnode) { 1061 ndlp = rdata->pnode; 1062 1063 /* Make sure we have the right NPortID if one is specified */ 1064 if (phba->lpfc_injerr_nportid && 1065 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1066 return 0; 1067 1068 /* 1069 * Make sure we have the right WWPN if one is specified. 1070 * wwn[0] should be a non-zero NAA in a good WWPN. 1071 */ 1072 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1073 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1074 sizeof(struct lpfc_name)) != 0)) 1075 return 0; 1076 } 1077 1078 /* Setup a ptr to the protection data if the SCSI host provides it */ 1079 if (sgpe) { 1080 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1081 src += blockoff; 1082 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1083 } 1084 1085 /* Should we change the Reference Tag */ 1086 if (reftag) { 1087 if (phba->lpfc_injerr_wref_cnt) { 1088 switch (op) { 1089 case SCSI_PROT_WRITE_PASS: 1090 if (src) { 1091 /* 1092 * For WRITE_PASS, force the error 1093 * to be sent on the wire. It should 1094 * be detected by the Target. 1095 * If blockoff != 0 error will be 1096 * inserted in middle of the IO. 1097 */ 1098 1099 lpfc_printf_log(phba, KERN_ERR, 1100 LOG_TRACE_EVENT, 1101 "9076 BLKGRD: Injecting reftag error: " 1102 "write lba x%lx + x%x oldrefTag x%x\n", 1103 (unsigned long)lba, blockoff, 1104 be32_to_cpu(src->ref_tag)); 1105 1106 /* 1107 * Save the old ref_tag so we can 1108 * restore it on completion. 1109 */ 1110 if (lpfc_cmd) { 1111 lpfc_cmd->prot_data_type = 1112 LPFC_INJERR_REFTAG; 1113 lpfc_cmd->prot_data_segment = 1114 src; 1115 lpfc_cmd->prot_data = 1116 src->ref_tag; 1117 } 1118 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1119 phba->lpfc_injerr_wref_cnt--; 1120 if (phba->lpfc_injerr_wref_cnt == 0) { 1121 phba->lpfc_injerr_nportid = 0; 1122 phba->lpfc_injerr_lba = 1123 LPFC_INJERR_LBA_OFF; 1124 memset(&phba->lpfc_injerr_wwpn, 1125 0, sizeof(struct lpfc_name)); 1126 } 1127 rc = BG_ERR_TGT | BG_ERR_CHECK; 1128 1129 break; 1130 } 1131 fallthrough; 1132 case SCSI_PROT_WRITE_INSERT: 1133 /* 1134 * For WRITE_INSERT, force the error 1135 * to be sent on the wire. It should be 1136 * detected by the Target. 1137 */ 1138 /* DEADBEEF will be the reftag on the wire */ 1139 *reftag = 0xDEADBEEF; 1140 phba->lpfc_injerr_wref_cnt--; 1141 if (phba->lpfc_injerr_wref_cnt == 0) { 1142 phba->lpfc_injerr_nportid = 0; 1143 phba->lpfc_injerr_lba = 1144 LPFC_INJERR_LBA_OFF; 1145 memset(&phba->lpfc_injerr_wwpn, 1146 0, sizeof(struct lpfc_name)); 1147 } 1148 rc = BG_ERR_TGT | BG_ERR_CHECK; 1149 1150 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1151 "9078 BLKGRD: Injecting reftag error: " 1152 "write lba x%lx\n", (unsigned long)lba); 1153 break; 1154 case SCSI_PROT_WRITE_STRIP: 1155 /* 1156 * For WRITE_STRIP and WRITE_PASS, 1157 * force the error on data 1158 * being copied from SLI-Host to SLI-Port. 1159 */ 1160 *reftag = 0xDEADBEEF; 1161 phba->lpfc_injerr_wref_cnt--; 1162 if (phba->lpfc_injerr_wref_cnt == 0) { 1163 phba->lpfc_injerr_nportid = 0; 1164 phba->lpfc_injerr_lba = 1165 LPFC_INJERR_LBA_OFF; 1166 memset(&phba->lpfc_injerr_wwpn, 1167 0, sizeof(struct lpfc_name)); 1168 } 1169 rc = BG_ERR_INIT; 1170 1171 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1172 "9077 BLKGRD: Injecting reftag error: " 1173 "write lba x%lx\n", (unsigned long)lba); 1174 break; 1175 } 1176 } 1177 if (phba->lpfc_injerr_rref_cnt) { 1178 switch (op) { 1179 case SCSI_PROT_READ_INSERT: 1180 case SCSI_PROT_READ_STRIP: 1181 case SCSI_PROT_READ_PASS: 1182 /* 1183 * For READ_STRIP and READ_PASS, force the 1184 * error on data being read off the wire. It 1185 * should force an IO error to the driver. 1186 */ 1187 *reftag = 0xDEADBEEF; 1188 phba->lpfc_injerr_rref_cnt--; 1189 if (phba->lpfc_injerr_rref_cnt == 0) { 1190 phba->lpfc_injerr_nportid = 0; 1191 phba->lpfc_injerr_lba = 1192 LPFC_INJERR_LBA_OFF; 1193 memset(&phba->lpfc_injerr_wwpn, 1194 0, sizeof(struct lpfc_name)); 1195 } 1196 rc = BG_ERR_INIT; 1197 1198 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1199 "9079 BLKGRD: Injecting reftag error: " 1200 "read lba x%lx\n", (unsigned long)lba); 1201 break; 1202 } 1203 } 1204 } 1205 1206 /* Should we change the Application Tag */ 1207 if (apptag) { 1208 if (phba->lpfc_injerr_wapp_cnt) { 1209 switch (op) { 1210 case SCSI_PROT_WRITE_PASS: 1211 if (src) { 1212 /* 1213 * For WRITE_PASS, force the error 1214 * to be sent on the wire. It should 1215 * be detected by the Target. 1216 * If blockoff != 0 error will be 1217 * inserted in middle of the IO. 1218 */ 1219 1220 lpfc_printf_log(phba, KERN_ERR, 1221 LOG_TRACE_EVENT, 1222 "9080 BLKGRD: Injecting apptag error: " 1223 "write lba x%lx + x%x oldappTag x%x\n", 1224 (unsigned long)lba, blockoff, 1225 be16_to_cpu(src->app_tag)); 1226 1227 /* 1228 * Save the old app_tag so we can 1229 * restore it on completion. 1230 */ 1231 if (lpfc_cmd) { 1232 lpfc_cmd->prot_data_type = 1233 LPFC_INJERR_APPTAG; 1234 lpfc_cmd->prot_data_segment = 1235 src; 1236 lpfc_cmd->prot_data = 1237 src->app_tag; 1238 } 1239 src->app_tag = cpu_to_be16(0xDEAD); 1240 phba->lpfc_injerr_wapp_cnt--; 1241 if (phba->lpfc_injerr_wapp_cnt == 0) { 1242 phba->lpfc_injerr_nportid = 0; 1243 phba->lpfc_injerr_lba = 1244 LPFC_INJERR_LBA_OFF; 1245 memset(&phba->lpfc_injerr_wwpn, 1246 0, sizeof(struct lpfc_name)); 1247 } 1248 rc = BG_ERR_TGT | BG_ERR_CHECK; 1249 break; 1250 } 1251 fallthrough; 1252 case SCSI_PROT_WRITE_INSERT: 1253 /* 1254 * For WRITE_INSERT, force the 1255 * error to be sent on the wire. It should be 1256 * detected by the Target. 1257 */ 1258 /* DEAD will be the apptag on the wire */ 1259 *apptag = 0xDEAD; 1260 phba->lpfc_injerr_wapp_cnt--; 1261 if (phba->lpfc_injerr_wapp_cnt == 0) { 1262 phba->lpfc_injerr_nportid = 0; 1263 phba->lpfc_injerr_lba = 1264 LPFC_INJERR_LBA_OFF; 1265 memset(&phba->lpfc_injerr_wwpn, 1266 0, sizeof(struct lpfc_name)); 1267 } 1268 rc = BG_ERR_TGT | BG_ERR_CHECK; 1269 1270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1271 "0813 BLKGRD: Injecting apptag error: " 1272 "write lba x%lx\n", (unsigned long)lba); 1273 break; 1274 case SCSI_PROT_WRITE_STRIP: 1275 /* 1276 * For WRITE_STRIP and WRITE_PASS, 1277 * force the error on data 1278 * being copied from SLI-Host to SLI-Port. 1279 */ 1280 *apptag = 0xDEAD; 1281 phba->lpfc_injerr_wapp_cnt--; 1282 if (phba->lpfc_injerr_wapp_cnt == 0) { 1283 phba->lpfc_injerr_nportid = 0; 1284 phba->lpfc_injerr_lba = 1285 LPFC_INJERR_LBA_OFF; 1286 memset(&phba->lpfc_injerr_wwpn, 1287 0, sizeof(struct lpfc_name)); 1288 } 1289 rc = BG_ERR_INIT; 1290 1291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1292 "0812 BLKGRD: Injecting apptag error: " 1293 "write lba x%lx\n", (unsigned long)lba); 1294 break; 1295 } 1296 } 1297 if (phba->lpfc_injerr_rapp_cnt) { 1298 switch (op) { 1299 case SCSI_PROT_READ_INSERT: 1300 case SCSI_PROT_READ_STRIP: 1301 case SCSI_PROT_READ_PASS: 1302 /* 1303 * For READ_STRIP and READ_PASS, force the 1304 * error on data being read off the wire. It 1305 * should force an IO error to the driver. 1306 */ 1307 *apptag = 0xDEAD; 1308 phba->lpfc_injerr_rapp_cnt--; 1309 if (phba->lpfc_injerr_rapp_cnt == 0) { 1310 phba->lpfc_injerr_nportid = 0; 1311 phba->lpfc_injerr_lba = 1312 LPFC_INJERR_LBA_OFF; 1313 memset(&phba->lpfc_injerr_wwpn, 1314 0, sizeof(struct lpfc_name)); 1315 } 1316 rc = BG_ERR_INIT; 1317 1318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1319 "0814 BLKGRD: Injecting apptag error: " 1320 "read lba x%lx\n", (unsigned long)lba); 1321 break; 1322 } 1323 } 1324 } 1325 1326 1327 /* Should we change the Guard Tag */ 1328 if (new_guard) { 1329 if (phba->lpfc_injerr_wgrd_cnt) { 1330 switch (op) { 1331 case SCSI_PROT_WRITE_PASS: 1332 rc = BG_ERR_CHECK; 1333 fallthrough; 1334 1335 case SCSI_PROT_WRITE_INSERT: 1336 /* 1337 * For WRITE_INSERT, force the 1338 * error to be sent on the wire. It should be 1339 * detected by the Target. 1340 */ 1341 phba->lpfc_injerr_wgrd_cnt--; 1342 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1343 phba->lpfc_injerr_nportid = 0; 1344 phba->lpfc_injerr_lba = 1345 LPFC_INJERR_LBA_OFF; 1346 memset(&phba->lpfc_injerr_wwpn, 1347 0, sizeof(struct lpfc_name)); 1348 } 1349 1350 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1351 /* Signals the caller to swap CRC->CSUM */ 1352 1353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1354 "0817 BLKGRD: Injecting guard error: " 1355 "write lba x%lx\n", (unsigned long)lba); 1356 break; 1357 case SCSI_PROT_WRITE_STRIP: 1358 /* 1359 * For WRITE_STRIP and WRITE_PASS, 1360 * force the error on data 1361 * being copied from SLI-Host to SLI-Port. 1362 */ 1363 phba->lpfc_injerr_wgrd_cnt--; 1364 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1365 phba->lpfc_injerr_nportid = 0; 1366 phba->lpfc_injerr_lba = 1367 LPFC_INJERR_LBA_OFF; 1368 memset(&phba->lpfc_injerr_wwpn, 1369 0, sizeof(struct lpfc_name)); 1370 } 1371 1372 rc = BG_ERR_INIT | BG_ERR_SWAP; 1373 /* Signals the caller to swap CRC->CSUM */ 1374 1375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1376 "0816 BLKGRD: Injecting guard error: " 1377 "write lba x%lx\n", (unsigned long)lba); 1378 break; 1379 } 1380 } 1381 if (phba->lpfc_injerr_rgrd_cnt) { 1382 switch (op) { 1383 case SCSI_PROT_READ_INSERT: 1384 case SCSI_PROT_READ_STRIP: 1385 case SCSI_PROT_READ_PASS: 1386 /* 1387 * For READ_STRIP and READ_PASS, force the 1388 * error on data being read off the wire. It 1389 * should force an IO error to the driver. 1390 */ 1391 phba->lpfc_injerr_rgrd_cnt--; 1392 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1393 phba->lpfc_injerr_nportid = 0; 1394 phba->lpfc_injerr_lba = 1395 LPFC_INJERR_LBA_OFF; 1396 memset(&phba->lpfc_injerr_wwpn, 1397 0, sizeof(struct lpfc_name)); 1398 } 1399 1400 rc = BG_ERR_INIT | BG_ERR_SWAP; 1401 /* Signals the caller to swap CRC->CSUM */ 1402 1403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1404 "0818 BLKGRD: Injecting guard error: " 1405 "read lba x%lx\n", (unsigned long)lba); 1406 } 1407 } 1408 } 1409 1410 return rc; 1411 } 1412 #endif 1413 1414 /** 1415 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1416 * the specified SCSI command. 1417 * @phba: The Hba for which this call is being executed. 1418 * @sc: The SCSI command to examine 1419 * @txop: (out) BlockGuard operation for transmitted data 1420 * @rxop: (out) BlockGuard operation for received data 1421 * 1422 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1423 * 1424 **/ 1425 static int 1426 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1427 uint8_t *txop, uint8_t *rxop) 1428 { 1429 uint8_t ret = 0; 1430 1431 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1432 switch (scsi_get_prot_op(sc)) { 1433 case SCSI_PROT_READ_INSERT: 1434 case SCSI_PROT_WRITE_STRIP: 1435 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1436 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1437 break; 1438 1439 case SCSI_PROT_READ_STRIP: 1440 case SCSI_PROT_WRITE_INSERT: 1441 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1442 *txop = BG_OP_IN_NODIF_OUT_CRC; 1443 break; 1444 1445 case SCSI_PROT_READ_PASS: 1446 case SCSI_PROT_WRITE_PASS: 1447 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1448 *txop = BG_OP_IN_CSUM_OUT_CRC; 1449 break; 1450 1451 case SCSI_PROT_NORMAL: 1452 default: 1453 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1454 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1455 scsi_get_prot_op(sc)); 1456 ret = 1; 1457 break; 1458 1459 } 1460 } else { 1461 switch (scsi_get_prot_op(sc)) { 1462 case SCSI_PROT_READ_STRIP: 1463 case SCSI_PROT_WRITE_INSERT: 1464 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1465 *txop = BG_OP_IN_NODIF_OUT_CRC; 1466 break; 1467 1468 case SCSI_PROT_READ_PASS: 1469 case SCSI_PROT_WRITE_PASS: 1470 *rxop = BG_OP_IN_CRC_OUT_CRC; 1471 *txop = BG_OP_IN_CRC_OUT_CRC; 1472 break; 1473 1474 case SCSI_PROT_READ_INSERT: 1475 case SCSI_PROT_WRITE_STRIP: 1476 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1477 *txop = BG_OP_IN_CRC_OUT_NODIF; 1478 break; 1479 1480 case SCSI_PROT_NORMAL: 1481 default: 1482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1483 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1484 scsi_get_prot_op(sc)); 1485 ret = 1; 1486 break; 1487 } 1488 } 1489 1490 return ret; 1491 } 1492 1493 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1494 /** 1495 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1496 * the specified SCSI command in order to force a guard tag error. 1497 * @phba: The Hba for which this call is being executed. 1498 * @sc: The SCSI command to examine 1499 * @txop: (out) BlockGuard operation for transmitted data 1500 * @rxop: (out) BlockGuard operation for received data 1501 * 1502 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1503 * 1504 **/ 1505 static int 1506 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1507 uint8_t *txop, uint8_t *rxop) 1508 { 1509 1510 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1511 switch (scsi_get_prot_op(sc)) { 1512 case SCSI_PROT_READ_INSERT: 1513 case SCSI_PROT_WRITE_STRIP: 1514 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1515 *txop = BG_OP_IN_CRC_OUT_NODIF; 1516 break; 1517 1518 case SCSI_PROT_READ_STRIP: 1519 case SCSI_PROT_WRITE_INSERT: 1520 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1521 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1522 break; 1523 1524 case SCSI_PROT_READ_PASS: 1525 case SCSI_PROT_WRITE_PASS: 1526 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1527 *txop = BG_OP_IN_CRC_OUT_CSUM; 1528 break; 1529 1530 case SCSI_PROT_NORMAL: 1531 default: 1532 break; 1533 1534 } 1535 } else { 1536 switch (scsi_get_prot_op(sc)) { 1537 case SCSI_PROT_READ_STRIP: 1538 case SCSI_PROT_WRITE_INSERT: 1539 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1540 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1541 break; 1542 1543 case SCSI_PROT_READ_PASS: 1544 case SCSI_PROT_WRITE_PASS: 1545 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1546 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1547 break; 1548 1549 case SCSI_PROT_READ_INSERT: 1550 case SCSI_PROT_WRITE_STRIP: 1551 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1552 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1553 break; 1554 1555 case SCSI_PROT_NORMAL: 1556 default: 1557 break; 1558 } 1559 } 1560 1561 return 0; 1562 } 1563 #endif 1564 1565 /** 1566 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1567 * @phba: The Hba for which this call is being executed. 1568 * @sc: pointer to scsi command we're working on 1569 * @bpl: pointer to buffer list for protection groups 1570 * @datasegcnt: number of segments of data that have been dma mapped 1571 * 1572 * This function sets up BPL buffer list for protection groups of 1573 * type LPFC_PG_TYPE_NO_DIF 1574 * 1575 * This is usually used when the HBA is instructed to generate 1576 * DIFs and insert them into data stream (or strip DIF from 1577 * incoming data stream) 1578 * 1579 * The buffer list consists of just one protection group described 1580 * below: 1581 * +-------------------------+ 1582 * start of prot group --> | PDE_5 | 1583 * +-------------------------+ 1584 * | PDE_6 | 1585 * +-------------------------+ 1586 * | Data BDE | 1587 * +-------------------------+ 1588 * |more Data BDE's ... (opt)| 1589 * +-------------------------+ 1590 * 1591 * 1592 * Note: Data s/g buffers have been dma mapped 1593 * 1594 * Returns the number of BDEs added to the BPL. 1595 **/ 1596 static int 1597 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1598 struct ulp_bde64 *bpl, int datasegcnt) 1599 { 1600 struct scatterlist *sgde = NULL; /* s/g data entry */ 1601 struct lpfc_pde5 *pde5 = NULL; 1602 struct lpfc_pde6 *pde6 = NULL; 1603 dma_addr_t physaddr; 1604 int i = 0, num_bde = 0, status; 1605 int datadir = sc->sc_data_direction; 1606 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1607 uint32_t rc; 1608 #endif 1609 uint32_t checking = 1; 1610 uint32_t reftag; 1611 uint8_t txop, rxop; 1612 1613 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1614 if (status) 1615 goto out; 1616 1617 /* extract some info from the scsi command for pde*/ 1618 reftag = scsi_prot_ref_tag(sc); 1619 if (reftag == LPFC_INVALID_REFTAG) 1620 goto out; 1621 1622 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1623 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1624 if (rc) { 1625 if (rc & BG_ERR_SWAP) 1626 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1627 if (rc & BG_ERR_CHECK) 1628 checking = 0; 1629 } 1630 #endif 1631 1632 /* setup PDE5 with what we have */ 1633 pde5 = (struct lpfc_pde5 *) bpl; 1634 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1635 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1636 1637 /* Endianness conversion if necessary for PDE5 */ 1638 pde5->word0 = cpu_to_le32(pde5->word0); 1639 pde5->reftag = cpu_to_le32(reftag); 1640 1641 /* advance bpl and increment bde count */ 1642 num_bde++; 1643 bpl++; 1644 pde6 = (struct lpfc_pde6 *) bpl; 1645 1646 /* setup PDE6 with the rest of the info */ 1647 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1648 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1649 bf_set(pde6_optx, pde6, txop); 1650 bf_set(pde6_oprx, pde6, rxop); 1651 1652 /* 1653 * We only need to check the data on READs, for WRITEs 1654 * protection data is automatically generated, not checked. 1655 */ 1656 if (datadir == DMA_FROM_DEVICE) { 1657 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1658 bf_set(pde6_ce, pde6, checking); 1659 else 1660 bf_set(pde6_ce, pde6, 0); 1661 1662 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1663 bf_set(pde6_re, pde6, checking); 1664 else 1665 bf_set(pde6_re, pde6, 0); 1666 } 1667 bf_set(pde6_ai, pde6, 1); 1668 bf_set(pde6_ae, pde6, 0); 1669 bf_set(pde6_apptagval, pde6, 0); 1670 1671 /* Endianness conversion if necessary for PDE6 */ 1672 pde6->word0 = cpu_to_le32(pde6->word0); 1673 pde6->word1 = cpu_to_le32(pde6->word1); 1674 pde6->word2 = cpu_to_le32(pde6->word2); 1675 1676 /* advance bpl and increment bde count */ 1677 num_bde++; 1678 bpl++; 1679 1680 /* assumption: caller has already run dma_map_sg on command data */ 1681 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1682 physaddr = sg_dma_address(sgde); 1683 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1684 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1685 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1686 if (datadir == DMA_TO_DEVICE) 1687 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1688 else 1689 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1690 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1691 bpl++; 1692 num_bde++; 1693 } 1694 1695 out: 1696 return num_bde; 1697 } 1698 1699 /** 1700 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1701 * @phba: The Hba for which this call is being executed. 1702 * @sc: pointer to scsi command we're working on 1703 * @bpl: pointer to buffer list for protection groups 1704 * @datacnt: number of segments of data that have been dma mapped 1705 * @protcnt: number of segment of protection data that have been dma mapped 1706 * 1707 * This function sets up BPL buffer list for protection groups of 1708 * type LPFC_PG_TYPE_DIF 1709 * 1710 * This is usually used when DIFs are in their own buffers, 1711 * separate from the data. The HBA can then by instructed 1712 * to place the DIFs in the outgoing stream. For read operations, 1713 * The HBA could extract the DIFs and place it in DIF buffers. 1714 * 1715 * The buffer list for this type consists of one or more of the 1716 * protection groups described below: 1717 * +-------------------------+ 1718 * start of first prot group --> | PDE_5 | 1719 * +-------------------------+ 1720 * | PDE_6 | 1721 * +-------------------------+ 1722 * | PDE_7 (Prot BDE) | 1723 * +-------------------------+ 1724 * | Data BDE | 1725 * +-------------------------+ 1726 * |more Data BDE's ... (opt)| 1727 * +-------------------------+ 1728 * start of new prot group --> | PDE_5 | 1729 * +-------------------------+ 1730 * | ... | 1731 * +-------------------------+ 1732 * 1733 * Note: It is assumed that both data and protection s/g buffers have been 1734 * mapped for DMA 1735 * 1736 * Returns the number of BDEs added to the BPL. 1737 **/ 1738 static int 1739 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1740 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1741 { 1742 struct scatterlist *sgde = NULL; /* s/g data entry */ 1743 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1744 struct lpfc_pde5 *pde5 = NULL; 1745 struct lpfc_pde6 *pde6 = NULL; 1746 struct lpfc_pde7 *pde7 = NULL; 1747 dma_addr_t dataphysaddr, protphysaddr; 1748 unsigned short curr_data = 0, curr_prot = 0; 1749 unsigned int split_offset; 1750 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1751 unsigned int protgrp_blks, protgrp_bytes; 1752 unsigned int remainder, subtotal; 1753 int status; 1754 int datadir = sc->sc_data_direction; 1755 unsigned char pgdone = 0, alldone = 0; 1756 unsigned blksize; 1757 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1758 uint32_t rc; 1759 #endif 1760 uint32_t checking = 1; 1761 uint32_t reftag; 1762 uint8_t txop, rxop; 1763 int num_bde = 0; 1764 1765 sgpe = scsi_prot_sglist(sc); 1766 sgde = scsi_sglist(sc); 1767 1768 if (!sgpe || !sgde) { 1769 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1770 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1771 sgpe, sgde); 1772 return 0; 1773 } 1774 1775 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1776 if (status) 1777 goto out; 1778 1779 /* extract some info from the scsi command */ 1780 blksize = scsi_prot_interval(sc); 1781 reftag = scsi_prot_ref_tag(sc); 1782 if (reftag == LPFC_INVALID_REFTAG) 1783 goto out; 1784 1785 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1786 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1787 if (rc) { 1788 if (rc & BG_ERR_SWAP) 1789 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1790 if (rc & BG_ERR_CHECK) 1791 checking = 0; 1792 } 1793 #endif 1794 1795 split_offset = 0; 1796 do { 1797 /* Check to see if we ran out of space */ 1798 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1799 return num_bde + 3; 1800 1801 /* setup PDE5 with what we have */ 1802 pde5 = (struct lpfc_pde5 *) bpl; 1803 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1804 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1805 1806 /* Endianness conversion if necessary for PDE5 */ 1807 pde5->word0 = cpu_to_le32(pde5->word0); 1808 pde5->reftag = cpu_to_le32(reftag); 1809 1810 /* advance bpl and increment bde count */ 1811 num_bde++; 1812 bpl++; 1813 pde6 = (struct lpfc_pde6 *) bpl; 1814 1815 /* setup PDE6 with the rest of the info */ 1816 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1817 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1818 bf_set(pde6_optx, pde6, txop); 1819 bf_set(pde6_oprx, pde6, rxop); 1820 1821 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1822 bf_set(pde6_ce, pde6, checking); 1823 else 1824 bf_set(pde6_ce, pde6, 0); 1825 1826 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1827 bf_set(pde6_re, pde6, checking); 1828 else 1829 bf_set(pde6_re, pde6, 0); 1830 1831 bf_set(pde6_ai, pde6, 1); 1832 bf_set(pde6_ae, pde6, 0); 1833 bf_set(pde6_apptagval, pde6, 0); 1834 1835 /* Endianness conversion if necessary for PDE6 */ 1836 pde6->word0 = cpu_to_le32(pde6->word0); 1837 pde6->word1 = cpu_to_le32(pde6->word1); 1838 pde6->word2 = cpu_to_le32(pde6->word2); 1839 1840 /* advance bpl and increment bde count */ 1841 num_bde++; 1842 bpl++; 1843 1844 /* setup the first BDE that points to protection buffer */ 1845 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1846 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1847 1848 /* must be integer multiple of the DIF block length */ 1849 BUG_ON(protgroup_len % 8); 1850 1851 pde7 = (struct lpfc_pde7 *) bpl; 1852 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1853 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1854 1855 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1856 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1857 1858 protgrp_blks = protgroup_len / 8; 1859 protgrp_bytes = protgrp_blks * blksize; 1860 1861 /* check if this pde is crossing the 4K boundary; if so split */ 1862 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1863 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1864 protgroup_offset += protgroup_remainder; 1865 protgrp_blks = protgroup_remainder / 8; 1866 protgrp_bytes = protgrp_blks * blksize; 1867 } else { 1868 protgroup_offset = 0; 1869 curr_prot++; 1870 } 1871 1872 num_bde++; 1873 1874 /* setup BDE's for data blocks associated with DIF data */ 1875 pgdone = 0; 1876 subtotal = 0; /* total bytes processed for current prot grp */ 1877 while (!pgdone) { 1878 /* Check to see if we ran out of space */ 1879 if (num_bde >= phba->cfg_total_seg_cnt) 1880 return num_bde + 1; 1881 1882 if (!sgde) { 1883 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1884 "9065 BLKGRD:%s Invalid data segment\n", 1885 __func__); 1886 return 0; 1887 } 1888 bpl++; 1889 dataphysaddr = sg_dma_address(sgde) + split_offset; 1890 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1891 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1892 1893 remainder = sg_dma_len(sgde) - split_offset; 1894 1895 if ((subtotal + remainder) <= protgrp_bytes) { 1896 /* we can use this whole buffer */ 1897 bpl->tus.f.bdeSize = remainder; 1898 split_offset = 0; 1899 1900 if ((subtotal + remainder) == protgrp_bytes) 1901 pgdone = 1; 1902 } else { 1903 /* must split this buffer with next prot grp */ 1904 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1905 split_offset += bpl->tus.f.bdeSize; 1906 } 1907 1908 subtotal += bpl->tus.f.bdeSize; 1909 1910 if (datadir == DMA_TO_DEVICE) 1911 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1912 else 1913 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1914 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1915 1916 num_bde++; 1917 curr_data++; 1918 1919 if (split_offset) 1920 break; 1921 1922 /* Move to the next s/g segment if possible */ 1923 sgde = sg_next(sgde); 1924 1925 } 1926 1927 if (protgroup_offset) { 1928 /* update the reference tag */ 1929 reftag += protgrp_blks; 1930 bpl++; 1931 continue; 1932 } 1933 1934 /* are we done ? */ 1935 if (curr_prot == protcnt) { 1936 alldone = 1; 1937 } else if (curr_prot < protcnt) { 1938 /* advance to next prot buffer */ 1939 sgpe = sg_next(sgpe); 1940 bpl++; 1941 1942 /* update the reference tag */ 1943 reftag += protgrp_blks; 1944 } else { 1945 /* if we're here, we have a bug */ 1946 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1947 "9054 BLKGRD: bug in %s\n", __func__); 1948 } 1949 1950 } while (!alldone); 1951 out: 1952 1953 return num_bde; 1954 } 1955 1956 /** 1957 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1958 * @phba: The Hba for which this call is being executed. 1959 * @sc: pointer to scsi command we're working on 1960 * @sgl: pointer to buffer list for protection groups 1961 * @datasegcnt: number of segments of data that have been dma mapped 1962 * @lpfc_cmd: lpfc scsi command object pointer. 1963 * 1964 * This function sets up SGL buffer list for protection groups of 1965 * type LPFC_PG_TYPE_NO_DIF 1966 * 1967 * This is usually used when the HBA is instructed to generate 1968 * DIFs and insert them into data stream (or strip DIF from 1969 * incoming data stream) 1970 * 1971 * The buffer list consists of just one protection group described 1972 * below: 1973 * +-------------------------+ 1974 * start of prot group --> | DI_SEED | 1975 * +-------------------------+ 1976 * | Data SGE | 1977 * +-------------------------+ 1978 * |more Data SGE's ... (opt)| 1979 * +-------------------------+ 1980 * 1981 * 1982 * Note: Data s/g buffers have been dma mapped 1983 * 1984 * Returns the number of SGEs added to the SGL. 1985 **/ 1986 static int 1987 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1988 struct sli4_sge *sgl, int datasegcnt, 1989 struct lpfc_io_buf *lpfc_cmd) 1990 { 1991 struct scatterlist *sgde = NULL; /* s/g data entry */ 1992 struct sli4_sge_diseed *diseed = NULL; 1993 dma_addr_t physaddr; 1994 int i = 0, num_sge = 0, status; 1995 uint32_t reftag; 1996 uint8_t txop, rxop; 1997 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1998 uint32_t rc; 1999 #endif 2000 uint32_t checking = 1; 2001 uint32_t dma_len; 2002 uint32_t dma_offset = 0; 2003 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2004 int j; 2005 bool lsp_just_set = false; 2006 2007 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2008 if (status) 2009 goto out; 2010 2011 /* extract some info from the scsi command for pde*/ 2012 reftag = scsi_prot_ref_tag(sc); 2013 if (reftag == LPFC_INVALID_REFTAG) 2014 goto out; 2015 2016 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2017 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2018 if (rc) { 2019 if (rc & BG_ERR_SWAP) 2020 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2021 if (rc & BG_ERR_CHECK) 2022 checking = 0; 2023 } 2024 #endif 2025 2026 /* setup DISEED with what we have */ 2027 diseed = (struct sli4_sge_diseed *) sgl; 2028 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2029 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2030 2031 /* Endianness conversion if necessary */ 2032 diseed->ref_tag = cpu_to_le32(reftag); 2033 diseed->ref_tag_tran = diseed->ref_tag; 2034 2035 /* 2036 * We only need to check the data on READs, for WRITEs 2037 * protection data is automatically generated, not checked. 2038 */ 2039 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2040 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 2041 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2042 else 2043 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2044 2045 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2046 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2047 else 2048 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2049 } 2050 2051 /* setup DISEED with the rest of the info */ 2052 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2053 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2054 2055 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2056 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2057 2058 /* Endianness conversion if necessary for DISEED */ 2059 diseed->word2 = cpu_to_le32(diseed->word2); 2060 diseed->word3 = cpu_to_le32(diseed->word3); 2061 2062 /* advance bpl and increment sge count */ 2063 num_sge++; 2064 sgl++; 2065 2066 /* assumption: caller has already run dma_map_sg on command data */ 2067 sgde = scsi_sglist(sc); 2068 j = 3; 2069 for (i = 0; i < datasegcnt; i++) { 2070 /* clear it */ 2071 sgl->word2 = 0; 2072 2073 /* do we need to expand the segment */ 2074 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2075 ((datasegcnt - 1) != i)) { 2076 /* set LSP type */ 2077 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2078 2079 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2080 2081 if (unlikely(!sgl_xtra)) { 2082 lpfc_cmd->seg_cnt = 0; 2083 return 0; 2084 } 2085 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2086 sgl_xtra->dma_phys_sgl)); 2087 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2088 sgl_xtra->dma_phys_sgl)); 2089 2090 } else { 2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2092 } 2093 2094 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2095 if ((datasegcnt - 1) == i) 2096 bf_set(lpfc_sli4_sge_last, sgl, 1); 2097 physaddr = sg_dma_address(sgde); 2098 dma_len = sg_dma_len(sgde); 2099 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2100 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2101 2102 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2103 sgl->word2 = cpu_to_le32(sgl->word2); 2104 sgl->sge_len = cpu_to_le32(dma_len); 2105 2106 dma_offset += dma_len; 2107 sgde = sg_next(sgde); 2108 2109 sgl++; 2110 num_sge++; 2111 lsp_just_set = false; 2112 2113 } else { 2114 sgl->word2 = cpu_to_le32(sgl->word2); 2115 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2116 2117 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2118 i = i - 1; 2119 2120 lsp_just_set = true; 2121 } 2122 2123 j++; 2124 2125 } 2126 2127 out: 2128 return num_sge; 2129 } 2130 2131 /** 2132 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2133 * @phba: The Hba for which this call is being executed. 2134 * @sc: pointer to scsi command we're working on 2135 * @sgl: pointer to buffer list for protection groups 2136 * @datacnt: number of segments of data that have been dma mapped 2137 * @protcnt: number of segment of protection data that have been dma mapped 2138 * @lpfc_cmd: lpfc scsi command object pointer. 2139 * 2140 * This function sets up SGL buffer list for protection groups of 2141 * type LPFC_PG_TYPE_DIF 2142 * 2143 * This is usually used when DIFs are in their own buffers, 2144 * separate from the data. The HBA can then by instructed 2145 * to place the DIFs in the outgoing stream. For read operations, 2146 * The HBA could extract the DIFs and place it in DIF buffers. 2147 * 2148 * The buffer list for this type consists of one or more of the 2149 * protection groups described below: 2150 * +-------------------------+ 2151 * start of first prot group --> | DISEED | 2152 * +-------------------------+ 2153 * | DIF (Prot SGE) | 2154 * +-------------------------+ 2155 * | Data SGE | 2156 * +-------------------------+ 2157 * |more Data SGE's ... (opt)| 2158 * +-------------------------+ 2159 * start of new prot group --> | DISEED | 2160 * +-------------------------+ 2161 * | ... | 2162 * +-------------------------+ 2163 * 2164 * Note: It is assumed that both data and protection s/g buffers have been 2165 * mapped for DMA 2166 * 2167 * Returns the number of SGEs added to the SGL. 2168 **/ 2169 static int 2170 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2171 struct sli4_sge *sgl, int datacnt, int protcnt, 2172 struct lpfc_io_buf *lpfc_cmd) 2173 { 2174 struct scatterlist *sgde = NULL; /* s/g data entry */ 2175 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2176 struct sli4_sge_diseed *diseed = NULL; 2177 dma_addr_t dataphysaddr, protphysaddr; 2178 unsigned short curr_data = 0, curr_prot = 0; 2179 unsigned int split_offset; 2180 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2181 unsigned int protgrp_blks, protgrp_bytes; 2182 unsigned int remainder, subtotal; 2183 int status; 2184 unsigned char pgdone = 0, alldone = 0; 2185 unsigned blksize; 2186 uint32_t reftag; 2187 uint8_t txop, rxop; 2188 uint32_t dma_len; 2189 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2190 uint32_t rc; 2191 #endif 2192 uint32_t checking = 1; 2193 uint32_t dma_offset = 0; 2194 int num_sge = 0, j = 2; 2195 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2196 2197 sgpe = scsi_prot_sglist(sc); 2198 sgde = scsi_sglist(sc); 2199 2200 if (!sgpe || !sgde) { 2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2202 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2203 sgpe, sgde); 2204 return 0; 2205 } 2206 2207 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2208 if (status) 2209 goto out; 2210 2211 /* extract some info from the scsi command */ 2212 blksize = scsi_prot_interval(sc); 2213 reftag = scsi_prot_ref_tag(sc); 2214 if (reftag == LPFC_INVALID_REFTAG) 2215 goto out; 2216 2217 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2218 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2219 if (rc) { 2220 if (rc & BG_ERR_SWAP) 2221 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2222 if (rc & BG_ERR_CHECK) 2223 checking = 0; 2224 } 2225 #endif 2226 2227 split_offset = 0; 2228 do { 2229 /* Check to see if we ran out of space */ 2230 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2231 !(phba->cfg_xpsgl)) 2232 return num_sge + 3; 2233 2234 /* DISEED and DIF have to be together */ 2235 if (!((j + 1) % phba->border_sge_num) || 2236 !((j + 2) % phba->border_sge_num) || 2237 !((j + 3) % phba->border_sge_num)) { 2238 sgl->word2 = 0; 2239 2240 /* set LSP type */ 2241 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2242 2243 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2244 2245 if (unlikely(!sgl_xtra)) { 2246 goto out; 2247 } else { 2248 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2249 sgl_xtra->dma_phys_sgl)); 2250 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2251 sgl_xtra->dma_phys_sgl)); 2252 } 2253 2254 sgl->word2 = cpu_to_le32(sgl->word2); 2255 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2256 2257 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2258 j = 0; 2259 } 2260 2261 /* setup DISEED with what we have */ 2262 diseed = (struct sli4_sge_diseed *) sgl; 2263 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2264 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2265 2266 /* Endianness conversion if necessary */ 2267 diseed->ref_tag = cpu_to_le32(reftag); 2268 diseed->ref_tag_tran = diseed->ref_tag; 2269 2270 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { 2271 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2272 } else { 2273 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2274 /* 2275 * When in this mode, the hardware will replace 2276 * the guard tag from the host with a 2277 * newly generated good CRC for the wire. 2278 * Switch to raw mode here to avoid this 2279 * behavior. What the host sends gets put on the wire. 2280 */ 2281 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2282 txop = BG_OP_RAW_MODE; 2283 rxop = BG_OP_RAW_MODE; 2284 } 2285 } 2286 2287 2288 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2289 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2290 else 2291 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2292 2293 /* setup DISEED with the rest of the info */ 2294 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2295 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2296 2297 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2298 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2299 2300 /* Endianness conversion if necessary for DISEED */ 2301 diseed->word2 = cpu_to_le32(diseed->word2); 2302 diseed->word3 = cpu_to_le32(diseed->word3); 2303 2304 /* advance sgl and increment bde count */ 2305 num_sge++; 2306 2307 sgl++; 2308 j++; 2309 2310 /* setup the first BDE that points to protection buffer */ 2311 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2312 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2313 2314 /* must be integer multiple of the DIF block length */ 2315 BUG_ON(protgroup_len % 8); 2316 2317 /* Now setup DIF SGE */ 2318 sgl->word2 = 0; 2319 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2320 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2321 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2322 sgl->word2 = cpu_to_le32(sgl->word2); 2323 sgl->sge_len = 0; 2324 2325 protgrp_blks = protgroup_len / 8; 2326 protgrp_bytes = protgrp_blks * blksize; 2327 2328 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2329 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2330 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2331 protgroup_offset += protgroup_remainder; 2332 protgrp_blks = protgroup_remainder / 8; 2333 protgrp_bytes = protgrp_blks * blksize; 2334 } else { 2335 protgroup_offset = 0; 2336 curr_prot++; 2337 } 2338 2339 num_sge++; 2340 2341 /* setup SGE's for data blocks associated with DIF data */ 2342 pgdone = 0; 2343 subtotal = 0; /* total bytes processed for current prot grp */ 2344 2345 sgl++; 2346 j++; 2347 2348 while (!pgdone) { 2349 /* Check to see if we ran out of space */ 2350 if ((num_sge >= phba->cfg_total_seg_cnt) && 2351 !phba->cfg_xpsgl) 2352 return num_sge + 1; 2353 2354 if (!sgde) { 2355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2356 "9086 BLKGRD:%s Invalid data segment\n", 2357 __func__); 2358 return 0; 2359 } 2360 2361 if (!((j + 1) % phba->border_sge_num)) { 2362 sgl->word2 = 0; 2363 2364 /* set LSP type */ 2365 bf_set(lpfc_sli4_sge_type, sgl, 2366 LPFC_SGE_TYPE_LSP); 2367 2368 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2369 lpfc_cmd); 2370 2371 if (unlikely(!sgl_xtra)) { 2372 goto out; 2373 } else { 2374 sgl->addr_lo = cpu_to_le32( 2375 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2376 sgl->addr_hi = cpu_to_le32( 2377 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2378 } 2379 2380 sgl->word2 = cpu_to_le32(sgl->word2); 2381 sgl->sge_len = cpu_to_le32( 2382 phba->cfg_sg_dma_buf_size); 2383 2384 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2385 } else { 2386 dataphysaddr = sg_dma_address(sgde) + 2387 split_offset; 2388 2389 remainder = sg_dma_len(sgde) - split_offset; 2390 2391 if ((subtotal + remainder) <= protgrp_bytes) { 2392 /* we can use this whole buffer */ 2393 dma_len = remainder; 2394 split_offset = 0; 2395 2396 if ((subtotal + remainder) == 2397 protgrp_bytes) 2398 pgdone = 1; 2399 } else { 2400 /* must split this buffer with next 2401 * prot grp 2402 */ 2403 dma_len = protgrp_bytes - subtotal; 2404 split_offset += dma_len; 2405 } 2406 2407 subtotal += dma_len; 2408 2409 sgl->word2 = 0; 2410 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2411 dataphysaddr)); 2412 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2413 dataphysaddr)); 2414 bf_set(lpfc_sli4_sge_last, sgl, 0); 2415 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2416 bf_set(lpfc_sli4_sge_type, sgl, 2417 LPFC_SGE_TYPE_DATA); 2418 2419 sgl->sge_len = cpu_to_le32(dma_len); 2420 dma_offset += dma_len; 2421 2422 num_sge++; 2423 curr_data++; 2424 2425 if (split_offset) { 2426 sgl++; 2427 j++; 2428 break; 2429 } 2430 2431 /* Move to the next s/g segment if possible */ 2432 sgde = sg_next(sgde); 2433 2434 sgl++; 2435 } 2436 2437 j++; 2438 } 2439 2440 if (protgroup_offset) { 2441 /* update the reference tag */ 2442 reftag += protgrp_blks; 2443 continue; 2444 } 2445 2446 /* are we done ? */ 2447 if (curr_prot == protcnt) { 2448 /* mark the last SGL */ 2449 sgl--; 2450 bf_set(lpfc_sli4_sge_last, sgl, 1); 2451 alldone = 1; 2452 } else if (curr_prot < protcnt) { 2453 /* advance to next prot buffer */ 2454 sgpe = sg_next(sgpe); 2455 2456 /* update the reference tag */ 2457 reftag += protgrp_blks; 2458 } else { 2459 /* if we're here, we have a bug */ 2460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2461 "9085 BLKGRD: bug in %s\n", __func__); 2462 } 2463 2464 } while (!alldone); 2465 2466 out: 2467 2468 return num_sge; 2469 } 2470 2471 /** 2472 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2473 * @phba: The Hba for which this call is being executed. 2474 * @sc: pointer to scsi command we're working on 2475 * 2476 * Given a SCSI command that supports DIF, determine composition of protection 2477 * groups involved in setting up buffer lists 2478 * 2479 * Returns: Protection group type (with or without DIF) 2480 * 2481 **/ 2482 static int 2483 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2484 { 2485 int ret = LPFC_PG_TYPE_INVALID; 2486 unsigned char op = scsi_get_prot_op(sc); 2487 2488 switch (op) { 2489 case SCSI_PROT_READ_STRIP: 2490 case SCSI_PROT_WRITE_INSERT: 2491 ret = LPFC_PG_TYPE_NO_DIF; 2492 break; 2493 case SCSI_PROT_READ_INSERT: 2494 case SCSI_PROT_WRITE_STRIP: 2495 case SCSI_PROT_READ_PASS: 2496 case SCSI_PROT_WRITE_PASS: 2497 ret = LPFC_PG_TYPE_DIF_BUF; 2498 break; 2499 default: 2500 if (phba) 2501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2502 "9021 Unsupported protection op:%d\n", 2503 op); 2504 break; 2505 } 2506 return ret; 2507 } 2508 2509 /** 2510 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2511 * @phba: The Hba for which this call is being executed. 2512 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2513 * 2514 * Adjust the data length to account for how much data 2515 * is actually on the wire. 2516 * 2517 * returns the adjusted data length 2518 **/ 2519 static int 2520 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2521 struct lpfc_io_buf *lpfc_cmd) 2522 { 2523 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2524 int fcpdl; 2525 2526 fcpdl = scsi_bufflen(sc); 2527 2528 /* Check if there is protection data on the wire */ 2529 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2530 /* Read check for protection data */ 2531 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2532 return fcpdl; 2533 2534 } else { 2535 /* Write check for protection data */ 2536 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2537 return fcpdl; 2538 } 2539 2540 /* 2541 * If we are in DIF Type 1 mode every data block has a 8 byte 2542 * DIF (trailer) attached to it. Must ajust FCP data length 2543 * to account for the protection data. 2544 */ 2545 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; 2546 2547 return fcpdl; 2548 } 2549 2550 /** 2551 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2552 * @phba: The Hba for which this call is being executed. 2553 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2554 * 2555 * This is the protection/DIF aware version of 2556 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2557 * two functions eventually, but for now, it's here. 2558 * RETURNS 0 - SUCCESS, 2559 * 1 - Failed DMA map, retry. 2560 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2561 **/ 2562 static int 2563 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2564 struct lpfc_io_buf *lpfc_cmd) 2565 { 2566 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2567 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2568 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2569 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2570 uint32_t num_bde = 0; 2571 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2572 int prot_group_type = 0; 2573 int fcpdl; 2574 int ret = 1; 2575 struct lpfc_vport *vport = phba->pport; 2576 2577 /* 2578 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2579 * fcp_rsp regions to the first data bde entry 2580 */ 2581 bpl += 2; 2582 if (scsi_sg_count(scsi_cmnd)) { 2583 /* 2584 * The driver stores the segment count returned from dma_map_sg 2585 * because this a count of dma-mappings used to map the use_sg 2586 * pages. They are not guaranteed to be the same for those 2587 * architectures that implement an IOMMU. 2588 */ 2589 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2590 scsi_sglist(scsi_cmnd), 2591 scsi_sg_count(scsi_cmnd), datadir); 2592 if (unlikely(!datasegcnt)) 2593 return 1; 2594 2595 lpfc_cmd->seg_cnt = datasegcnt; 2596 2597 /* First check if data segment count from SCSI Layer is good */ 2598 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2599 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2600 ret = 2; 2601 goto err; 2602 } 2603 2604 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2605 2606 switch (prot_group_type) { 2607 case LPFC_PG_TYPE_NO_DIF: 2608 2609 /* Here we need to add a PDE5 and PDE6 to the count */ 2610 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2611 ret = 2; 2612 goto err; 2613 } 2614 2615 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2616 datasegcnt); 2617 /* we should have 2 or more entries in buffer list */ 2618 if (num_bde < 2) { 2619 ret = 2; 2620 goto err; 2621 } 2622 break; 2623 2624 case LPFC_PG_TYPE_DIF_BUF: 2625 /* 2626 * This type indicates that protection buffers are 2627 * passed to the driver, so that needs to be prepared 2628 * for DMA 2629 */ 2630 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2631 scsi_prot_sglist(scsi_cmnd), 2632 scsi_prot_sg_count(scsi_cmnd), datadir); 2633 if (unlikely(!protsegcnt)) { 2634 scsi_dma_unmap(scsi_cmnd); 2635 return 1; 2636 } 2637 2638 lpfc_cmd->prot_seg_cnt = protsegcnt; 2639 2640 /* 2641 * There is a minimun of 4 BPLs used for every 2642 * protection data segment. 2643 */ 2644 if ((lpfc_cmd->prot_seg_cnt * 4) > 2645 (phba->cfg_total_seg_cnt - 2)) { 2646 ret = 2; 2647 goto err; 2648 } 2649 2650 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2651 datasegcnt, protsegcnt); 2652 /* we should have 3 or more entries in buffer list */ 2653 if ((num_bde < 3) || 2654 (num_bde > phba->cfg_total_seg_cnt)) { 2655 ret = 2; 2656 goto err; 2657 } 2658 break; 2659 2660 case LPFC_PG_TYPE_INVALID: 2661 default: 2662 scsi_dma_unmap(scsi_cmnd); 2663 lpfc_cmd->seg_cnt = 0; 2664 2665 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2666 "9022 Unexpected protection group %i\n", 2667 prot_group_type); 2668 return 2; 2669 } 2670 } 2671 2672 /* 2673 * Finish initializing those IOCB fields that are dependent on the 2674 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2675 * reinitialized since all iocb memory resources are used many times 2676 * for transmit, receive, and continuation bpl's. 2677 */ 2678 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2679 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2680 iocb_cmd->ulpBdeCount = 1; 2681 iocb_cmd->ulpLe = 1; 2682 2683 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2684 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2685 2686 /* 2687 * Due to difference in data length between DIF/non-DIF paths, 2688 * we need to set word 4 of IOCB here 2689 */ 2690 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2691 2692 /* 2693 * For First burst, we may need to adjust the initial transfer 2694 * length for DIF 2695 */ 2696 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2697 (fcpdl < vport->cfg_first_burst_size)) 2698 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2699 2700 return 0; 2701 err: 2702 if (lpfc_cmd->seg_cnt) 2703 scsi_dma_unmap(scsi_cmnd); 2704 if (lpfc_cmd->prot_seg_cnt) 2705 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2706 scsi_prot_sg_count(scsi_cmnd), 2707 scsi_cmnd->sc_data_direction); 2708 2709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2710 "9023 Cannot setup S/G List for HBA" 2711 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2712 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2713 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2714 prot_group_type, num_bde); 2715 2716 lpfc_cmd->seg_cnt = 0; 2717 lpfc_cmd->prot_seg_cnt = 0; 2718 return ret; 2719 } 2720 2721 /* 2722 * This function calcuates the T10 DIF guard tag 2723 * on the specified data using a CRC algorithmn 2724 * using crc_t10dif. 2725 */ 2726 static uint16_t 2727 lpfc_bg_crc(uint8_t *data, int count) 2728 { 2729 uint16_t crc = 0; 2730 uint16_t x; 2731 2732 crc = crc_t10dif(data, count); 2733 x = cpu_to_be16(crc); 2734 return x; 2735 } 2736 2737 /* 2738 * This function calcuates the T10 DIF guard tag 2739 * on the specified data using a CSUM algorithmn 2740 * using ip_compute_csum. 2741 */ 2742 static uint16_t 2743 lpfc_bg_csum(uint8_t *data, int count) 2744 { 2745 uint16_t ret; 2746 2747 ret = ip_compute_csum(data, count); 2748 return ret; 2749 } 2750 2751 /* 2752 * This function examines the protection data to try to determine 2753 * what type of T10-DIF error occurred. 2754 */ 2755 static void 2756 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2757 { 2758 struct scatterlist *sgpe; /* s/g prot entry */ 2759 struct scatterlist *sgde; /* s/g data entry */ 2760 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2761 struct scsi_dif_tuple *src = NULL; 2762 uint8_t *data_src = NULL; 2763 uint16_t guard_tag; 2764 uint16_t start_app_tag, app_tag; 2765 uint32_t start_ref_tag, ref_tag; 2766 int prot, protsegcnt; 2767 int err_type, len, data_len; 2768 int chk_ref, chk_app, chk_guard; 2769 uint16_t sum; 2770 unsigned blksize; 2771 2772 err_type = BGS_GUARD_ERR_MASK; 2773 sum = 0; 2774 guard_tag = 0; 2775 2776 /* First check to see if there is protection data to examine */ 2777 prot = scsi_get_prot_op(cmd); 2778 if ((prot == SCSI_PROT_READ_STRIP) || 2779 (prot == SCSI_PROT_WRITE_INSERT) || 2780 (prot == SCSI_PROT_NORMAL)) 2781 goto out; 2782 2783 /* Currently the driver just supports ref_tag and guard_tag checking */ 2784 chk_ref = 1; 2785 chk_app = 0; 2786 chk_guard = 0; 2787 2788 /* Setup a ptr to the protection data provided by the SCSI host */ 2789 sgpe = scsi_prot_sglist(cmd); 2790 protsegcnt = lpfc_cmd->prot_seg_cnt; 2791 2792 if (sgpe && protsegcnt) { 2793 2794 /* 2795 * We will only try to verify guard tag if the segment 2796 * data length is a multiple of the blksize. 2797 */ 2798 sgde = scsi_sglist(cmd); 2799 blksize = scsi_prot_interval(cmd); 2800 data_src = (uint8_t *)sg_virt(sgde); 2801 data_len = sgde->length; 2802 if ((data_len & (blksize - 1)) == 0) 2803 chk_guard = 1; 2804 2805 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2806 start_ref_tag = scsi_prot_ref_tag(cmd); 2807 if (start_ref_tag == LPFC_INVALID_REFTAG) 2808 goto out; 2809 start_app_tag = src->app_tag; 2810 len = sgpe->length; 2811 while (src && protsegcnt) { 2812 while (len) { 2813 2814 /* 2815 * First check to see if a protection data 2816 * check is valid 2817 */ 2818 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2819 (src->app_tag == T10_PI_APP_ESCAPE)) { 2820 start_ref_tag++; 2821 goto skipit; 2822 } 2823 2824 /* First Guard Tag checking */ 2825 if (chk_guard) { 2826 guard_tag = src->guard_tag; 2827 if (cmd->prot_flags 2828 & SCSI_PROT_IP_CHECKSUM) 2829 sum = lpfc_bg_csum(data_src, 2830 blksize); 2831 else 2832 sum = lpfc_bg_crc(data_src, 2833 blksize); 2834 if ((guard_tag != sum)) { 2835 err_type = BGS_GUARD_ERR_MASK; 2836 goto out; 2837 } 2838 } 2839 2840 /* Reference Tag checking */ 2841 ref_tag = be32_to_cpu(src->ref_tag); 2842 if (chk_ref && (ref_tag != start_ref_tag)) { 2843 err_type = BGS_REFTAG_ERR_MASK; 2844 goto out; 2845 } 2846 start_ref_tag++; 2847 2848 /* App Tag checking */ 2849 app_tag = src->app_tag; 2850 if (chk_app && (app_tag != start_app_tag)) { 2851 err_type = BGS_APPTAG_ERR_MASK; 2852 goto out; 2853 } 2854 skipit: 2855 len -= sizeof(struct scsi_dif_tuple); 2856 if (len < 0) 2857 len = 0; 2858 src++; 2859 2860 data_src += blksize; 2861 data_len -= blksize; 2862 2863 /* 2864 * Are we at the end of the Data segment? 2865 * The data segment is only used for Guard 2866 * tag checking. 2867 */ 2868 if (chk_guard && (data_len == 0)) { 2869 chk_guard = 0; 2870 sgde = sg_next(sgde); 2871 if (!sgde) 2872 goto out; 2873 2874 data_src = (uint8_t *)sg_virt(sgde); 2875 data_len = sgde->length; 2876 if ((data_len & (blksize - 1)) == 0) 2877 chk_guard = 1; 2878 } 2879 } 2880 2881 /* Goto the next Protection data segment */ 2882 sgpe = sg_next(sgpe); 2883 if (sgpe) { 2884 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2885 len = sgpe->length; 2886 } else { 2887 src = NULL; 2888 } 2889 protsegcnt--; 2890 } 2891 } 2892 out: 2893 if (err_type == BGS_GUARD_ERR_MASK) { 2894 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2895 set_host_byte(cmd, DID_ABORT); 2896 phba->bg_guard_err_cnt++; 2897 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2898 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2899 scsi_prot_ref_tag(cmd), 2900 sum, guard_tag); 2901 2902 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2903 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2904 set_host_byte(cmd, DID_ABORT); 2905 2906 phba->bg_reftag_err_cnt++; 2907 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2908 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2909 scsi_prot_ref_tag(cmd), 2910 ref_tag, start_ref_tag); 2911 2912 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2913 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2914 set_host_byte(cmd, DID_ABORT); 2915 2916 phba->bg_apptag_err_cnt++; 2917 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2918 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2919 scsi_prot_ref_tag(cmd), 2920 app_tag, start_app_tag); 2921 } 2922 } 2923 2924 /* 2925 * This function checks for BlockGuard errors detected by 2926 * the HBA. In case of errors, the ASC/ASCQ fields in the 2927 * sense buffer will be set accordingly, paired with 2928 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2929 * detected corruption. 2930 * 2931 * Returns: 2932 * 0 - No error found 2933 * 1 - BlockGuard error found 2934 * -1 - Internal error (bad profile, ...etc) 2935 */ 2936 static int 2937 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2938 struct lpfc_iocbq *pIocbOut) 2939 { 2940 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2941 struct sli3_bg_fields *bgf; 2942 int ret = 0; 2943 struct lpfc_wcqe_complete *wcqe; 2944 u32 status; 2945 u32 bghm = 0; 2946 u32 bgstat = 0; 2947 u64 failing_sector = 0; 2948 2949 if (phba->sli_rev == LPFC_SLI_REV4) { 2950 wcqe = &pIocbOut->wcqe_cmpl; 2951 status = bf_get(lpfc_wcqe_c_status, wcqe); 2952 2953 if (status == CQE_STATUS_DI_ERROR) { 2954 /* Guard Check failed */ 2955 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) 2956 bgstat |= BGS_GUARD_ERR_MASK; 2957 2958 /* AppTag Check failed */ 2959 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) 2960 bgstat |= BGS_APPTAG_ERR_MASK; 2961 2962 /* RefTag Check failed */ 2963 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) 2964 bgstat |= BGS_REFTAG_ERR_MASK; 2965 2966 /* Check to see if there was any good data before the 2967 * error 2968 */ 2969 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2970 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2971 bghm = wcqe->total_data_placed; 2972 } 2973 2974 /* 2975 * Set ALL the error bits to indicate we don't know what 2976 * type of error it is. 2977 */ 2978 if (!bgstat) 2979 bgstat |= (BGS_REFTAG_ERR_MASK | 2980 BGS_APPTAG_ERR_MASK | 2981 BGS_GUARD_ERR_MASK); 2982 } 2983 2984 } else { 2985 bgf = &pIocbOut->iocb.unsli3.sli3_bg; 2986 bghm = bgf->bghm; 2987 bgstat = bgf->bgstat; 2988 } 2989 2990 if (lpfc_bgs_get_invalid_prof(bgstat)) { 2991 cmd->result = DID_ERROR << 16; 2992 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2993 "9072 BLKGRD: Invalid BG Profile in cmd " 2994 "0x%x reftag 0x%x blk cnt 0x%x " 2995 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2996 scsi_prot_ref_tag(cmd), 2997 scsi_logical_block_count(cmd), bgstat, bghm); 2998 ret = (-1); 2999 goto out; 3000 } 3001 3002 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3003 cmd->result = DID_ERROR << 16; 3004 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3005 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 3006 "0x%x reftag 0x%x blk cnt 0x%x " 3007 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3008 scsi_prot_ref_tag(cmd), 3009 scsi_logical_block_count(cmd), bgstat, bghm); 3010 ret = (-1); 3011 goto out; 3012 } 3013 3014 if (lpfc_bgs_get_guard_err(bgstat)) { 3015 ret = 1; 3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 3017 set_host_byte(cmd, DID_ABORT); 3018 phba->bg_guard_err_cnt++; 3019 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3020 "9055 BLKGRD: Guard Tag error in cmd " 3021 "0x%x reftag 0x%x blk cnt 0x%x " 3022 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3023 scsi_prot_ref_tag(cmd), 3024 scsi_logical_block_count(cmd), bgstat, bghm); 3025 } 3026 3027 if (lpfc_bgs_get_reftag_err(bgstat)) { 3028 ret = 1; 3029 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3030 set_host_byte(cmd, DID_ABORT); 3031 phba->bg_reftag_err_cnt++; 3032 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3033 "9056 BLKGRD: Ref Tag error in cmd " 3034 "0x%x reftag 0x%x blk cnt 0x%x " 3035 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3036 scsi_prot_ref_tag(cmd), 3037 scsi_logical_block_count(cmd), bgstat, bghm); 3038 } 3039 3040 if (lpfc_bgs_get_apptag_err(bgstat)) { 3041 ret = 1; 3042 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3043 set_host_byte(cmd, DID_ABORT); 3044 phba->bg_apptag_err_cnt++; 3045 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3046 "9061 BLKGRD: App Tag error in cmd " 3047 "0x%x reftag 0x%x blk cnt 0x%x " 3048 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3049 scsi_prot_ref_tag(cmd), 3050 scsi_logical_block_count(cmd), bgstat, bghm); 3051 } 3052 3053 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3054 /* 3055 * setup sense data descriptor 0 per SPC-4 as an information 3056 * field, and put the failing LBA in it. 3057 * This code assumes there was also a guard/app/ref tag error 3058 * indication. 3059 */ 3060 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3061 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3062 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3063 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3064 3065 /* bghm is a "on the wire" FC frame based count */ 3066 switch (scsi_get_prot_op(cmd)) { 3067 case SCSI_PROT_READ_INSERT: 3068 case SCSI_PROT_WRITE_STRIP: 3069 bghm /= cmd->device->sector_size; 3070 break; 3071 case SCSI_PROT_READ_STRIP: 3072 case SCSI_PROT_WRITE_INSERT: 3073 case SCSI_PROT_READ_PASS: 3074 case SCSI_PROT_WRITE_PASS: 3075 bghm /= (cmd->device->sector_size + 3076 sizeof(struct scsi_dif_tuple)); 3077 break; 3078 } 3079 3080 failing_sector = scsi_get_lba(cmd); 3081 failing_sector += bghm; 3082 3083 /* Descriptor Information */ 3084 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3085 } 3086 3087 if (!ret) { 3088 /* No error was reported - problem in FW? */ 3089 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3090 "9057 BLKGRD: Unknown error in cmd " 3091 "0x%x reftag 0x%x blk cnt 0x%x " 3092 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3093 scsi_prot_ref_tag(cmd), 3094 scsi_logical_block_count(cmd), bgstat, bghm); 3095 3096 /* Calculate what type of error it was */ 3097 lpfc_calc_bg_err(phba, lpfc_cmd); 3098 } 3099 out: 3100 return ret; 3101 } 3102 3103 /** 3104 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3105 * @phba: The Hba for which this call is being executed. 3106 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3107 * 3108 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3109 * field of @lpfc_cmd for device with SLI-4 interface spec. 3110 * 3111 * Return codes: 3112 * 2 - Error - Do not retry 3113 * 1 - Error - Retry 3114 * 0 - Success 3115 **/ 3116 static int 3117 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3118 { 3119 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3120 struct scatterlist *sgel = NULL; 3121 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3122 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3123 struct sli4_sge *first_data_sgl; 3124 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3125 struct lpfc_vport *vport = phba->pport; 3126 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3127 dma_addr_t physaddr; 3128 uint32_t dma_len; 3129 uint32_t dma_offset = 0; 3130 int nseg, i, j; 3131 struct ulp_bde64 *bde; 3132 bool lsp_just_set = false; 3133 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3134 3135 /* 3136 * There are three possibilities here - use scatter-gather segment, use 3137 * the single mapping, or neither. Start the lpfc command prep by 3138 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3139 * data bde entry. 3140 */ 3141 if (scsi_sg_count(scsi_cmnd)) { 3142 /* 3143 * The driver stores the segment count returned from dma_map_sg 3144 * because this a count of dma-mappings used to map the use_sg 3145 * pages. They are not guaranteed to be the same for those 3146 * architectures that implement an IOMMU. 3147 */ 3148 3149 nseg = scsi_dma_map(scsi_cmnd); 3150 if (unlikely(nseg <= 0)) 3151 return 1; 3152 sgl += 1; 3153 /* clear the last flag in the fcp_rsp map entry */ 3154 sgl->word2 = le32_to_cpu(sgl->word2); 3155 bf_set(lpfc_sli4_sge_last, sgl, 0); 3156 sgl->word2 = cpu_to_le32(sgl->word2); 3157 sgl += 1; 3158 first_data_sgl = sgl; 3159 lpfc_cmd->seg_cnt = nseg; 3160 if (!phba->cfg_xpsgl && 3161 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3162 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3163 "9074 BLKGRD:" 3164 " %s: Too many sg segments from " 3165 "dma_map_sg. Config %d, seg_cnt %d\n", 3166 __func__, phba->cfg_sg_seg_cnt, 3167 lpfc_cmd->seg_cnt); 3168 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3169 lpfc_cmd->seg_cnt = 0; 3170 scsi_dma_unmap(scsi_cmnd); 3171 return 2; 3172 } 3173 3174 /* 3175 * The driver established a maximum scatter-gather segment count 3176 * during probe that limits the number of sg elements in any 3177 * single scsi command. Just run through the seg_cnt and format 3178 * the sge's. 3179 * When using SLI-3 the driver will try to fit all the BDEs into 3180 * the IOCB. If it can't then the BDEs get added to a BPL as it 3181 * does for SLI-2 mode. 3182 */ 3183 3184 /* for tracking segment boundaries */ 3185 sgel = scsi_sglist(scsi_cmnd); 3186 j = 2; 3187 for (i = 0; i < nseg; i++) { 3188 sgl->word2 = 0; 3189 if (nseg == 1) { 3190 bf_set(lpfc_sli4_sge_last, sgl, 1); 3191 bf_set(lpfc_sli4_sge_type, sgl, 3192 LPFC_SGE_TYPE_DATA); 3193 } else { 3194 bf_set(lpfc_sli4_sge_last, sgl, 0); 3195 3196 /* do we need to expand the segment */ 3197 if (!lsp_just_set && 3198 !((j + 1) % phba->border_sge_num) && 3199 ((nseg - 1) != i)) { 3200 /* set LSP type */ 3201 bf_set(lpfc_sli4_sge_type, sgl, 3202 LPFC_SGE_TYPE_LSP); 3203 3204 sgl_xtra = lpfc_get_sgl_per_hdwq( 3205 phba, lpfc_cmd); 3206 3207 if (unlikely(!sgl_xtra)) { 3208 lpfc_cmd->seg_cnt = 0; 3209 scsi_dma_unmap(scsi_cmnd); 3210 return 1; 3211 } 3212 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3213 sgl_xtra->dma_phys_sgl)); 3214 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3215 sgl_xtra->dma_phys_sgl)); 3216 3217 } else { 3218 bf_set(lpfc_sli4_sge_type, sgl, 3219 LPFC_SGE_TYPE_DATA); 3220 } 3221 } 3222 3223 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3224 LPFC_SGE_TYPE_LSP)) { 3225 if ((nseg - 1) == i) 3226 bf_set(lpfc_sli4_sge_last, sgl, 1); 3227 3228 physaddr = sg_dma_address(sgel); 3229 dma_len = sg_dma_len(sgel); 3230 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3231 physaddr)); 3232 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3233 physaddr)); 3234 3235 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3236 sgl->word2 = cpu_to_le32(sgl->word2); 3237 sgl->sge_len = cpu_to_le32(dma_len); 3238 3239 dma_offset += dma_len; 3240 sgel = sg_next(sgel); 3241 3242 sgl++; 3243 lsp_just_set = false; 3244 3245 } else { 3246 sgl->word2 = cpu_to_le32(sgl->word2); 3247 sgl->sge_len = cpu_to_le32( 3248 phba->cfg_sg_dma_buf_size); 3249 3250 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3251 i = i - 1; 3252 3253 lsp_just_set = true; 3254 } 3255 3256 j++; 3257 } 3258 3259 /* PBDE support for first data SGE only. 3260 * For FCoE, we key off Performance Hints. 3261 * For FC, we key off lpfc_enable_pbde. 3262 */ 3263 if (nseg == 1 && 3264 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3265 phba->cfg_enable_pbde)) { 3266 /* Words 13-15 */ 3267 bde = (struct ulp_bde64 *) 3268 &wqe->words[13]; 3269 bde->addrLow = first_data_sgl->addr_lo; 3270 bde->addrHigh = first_data_sgl->addr_hi; 3271 bde->tus.f.bdeSize = 3272 le32_to_cpu(first_data_sgl->sge_len); 3273 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3274 bde->tus.w = cpu_to_le32(bde->tus.w); 3275 3276 /* Word 11 - set PBDE bit */ 3277 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3278 } else { 3279 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3280 /* Word 11 - PBDE bit disabled by default template */ 3281 } 3282 } else { 3283 sgl += 1; 3284 /* set the last flag in the fcp_rsp map entry */ 3285 sgl->word2 = le32_to_cpu(sgl->word2); 3286 bf_set(lpfc_sli4_sge_last, sgl, 1); 3287 sgl->word2 = cpu_to_le32(sgl->word2); 3288 3289 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3290 phba->cfg_enable_pbde) { 3291 bde = (struct ulp_bde64 *) 3292 &wqe->words[13]; 3293 memset(bde, 0, (sizeof(uint32_t) * 3)); 3294 } 3295 } 3296 3297 /* 3298 * Finish initializing those IOCB fields that are dependent on the 3299 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3300 * explicitly reinitialized. 3301 * all iocb memory resources are reused. 3302 */ 3303 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3304 /* Set first-burst provided it was successfully negotiated */ 3305 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3306 vport->cfg_first_burst_size && 3307 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3308 u32 init_len, total_len; 3309 3310 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3311 init_len = min(total_len, vport->cfg_first_burst_size); 3312 3313 /* Word 4 & 5 */ 3314 wqe->fcp_iwrite.initial_xfer_len = init_len; 3315 wqe->fcp_iwrite.total_xfer_len = total_len; 3316 } else { 3317 /* Word 4 */ 3318 wqe->fcp_iwrite.total_xfer_len = 3319 be32_to_cpu(fcp_cmnd->fcpDl); 3320 } 3321 3322 /* 3323 * If the OAS driver feature is enabled and the lun is enabled for 3324 * OAS, set the oas iocb related flags. 3325 */ 3326 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3327 scsi_cmnd->device->hostdata)->oas_enabled) { 3328 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3329 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3330 scsi_cmnd->device->hostdata)->priority; 3331 3332 /* Word 10 */ 3333 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3334 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3335 3336 if (lpfc_cmd->cur_iocbq.priority) 3337 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3338 (lpfc_cmd->cur_iocbq.priority << 1)); 3339 else 3340 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3341 (phba->cfg_XLanePriority << 1)); 3342 } 3343 3344 return 0; 3345 } 3346 3347 /** 3348 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3349 * @phba: The Hba for which this call is being executed. 3350 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3351 * 3352 * This is the protection/DIF aware version of 3353 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3354 * two functions eventually, but for now, it's here 3355 * Return codes: 3356 * 2 - Error - Do not retry 3357 * 1 - Error - Retry 3358 * 0 - Success 3359 **/ 3360 static int 3361 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3362 struct lpfc_io_buf *lpfc_cmd) 3363 { 3364 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3365 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3366 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3367 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3368 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3369 uint32_t num_sge = 0; 3370 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3371 int prot_group_type = 0; 3372 int fcpdl; 3373 int ret = 1; 3374 struct lpfc_vport *vport = phba->pport; 3375 3376 /* 3377 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3378 * fcp_rsp regions to the first data sge entry 3379 */ 3380 if (scsi_sg_count(scsi_cmnd)) { 3381 /* 3382 * The driver stores the segment count returned from dma_map_sg 3383 * because this a count of dma-mappings used to map the use_sg 3384 * pages. They are not guaranteed to be the same for those 3385 * architectures that implement an IOMMU. 3386 */ 3387 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3388 scsi_sglist(scsi_cmnd), 3389 scsi_sg_count(scsi_cmnd), datadir); 3390 if (unlikely(!datasegcnt)) 3391 return 1; 3392 3393 sgl += 1; 3394 /* clear the last flag in the fcp_rsp map entry */ 3395 sgl->word2 = le32_to_cpu(sgl->word2); 3396 bf_set(lpfc_sli4_sge_last, sgl, 0); 3397 sgl->word2 = cpu_to_le32(sgl->word2); 3398 3399 sgl += 1; 3400 lpfc_cmd->seg_cnt = datasegcnt; 3401 3402 /* First check if data segment count from SCSI Layer is good */ 3403 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3404 !phba->cfg_xpsgl) { 3405 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3406 ret = 2; 3407 goto err; 3408 } 3409 3410 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3411 3412 switch (prot_group_type) { 3413 case LPFC_PG_TYPE_NO_DIF: 3414 /* Here we need to add a DISEED to the count */ 3415 if (((lpfc_cmd->seg_cnt + 1) > 3416 phba->cfg_total_seg_cnt) && 3417 !phba->cfg_xpsgl) { 3418 ret = 2; 3419 goto err; 3420 } 3421 3422 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3423 datasegcnt, lpfc_cmd); 3424 3425 /* we should have 2 or more entries in buffer list */ 3426 if (num_sge < 2) { 3427 ret = 2; 3428 goto err; 3429 } 3430 break; 3431 3432 case LPFC_PG_TYPE_DIF_BUF: 3433 /* 3434 * This type indicates that protection buffers are 3435 * passed to the driver, so that needs to be prepared 3436 * for DMA 3437 */ 3438 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3439 scsi_prot_sglist(scsi_cmnd), 3440 scsi_prot_sg_count(scsi_cmnd), datadir); 3441 if (unlikely(!protsegcnt)) { 3442 scsi_dma_unmap(scsi_cmnd); 3443 return 1; 3444 } 3445 3446 lpfc_cmd->prot_seg_cnt = protsegcnt; 3447 /* 3448 * There is a minimun of 3 SGEs used for every 3449 * protection data segment. 3450 */ 3451 if (((lpfc_cmd->prot_seg_cnt * 3) > 3452 (phba->cfg_total_seg_cnt - 2)) && 3453 !phba->cfg_xpsgl) { 3454 ret = 2; 3455 goto err; 3456 } 3457 3458 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3459 datasegcnt, protsegcnt, lpfc_cmd); 3460 3461 /* we should have 3 or more entries in buffer list */ 3462 if (num_sge < 3 || 3463 (num_sge > phba->cfg_total_seg_cnt && 3464 !phba->cfg_xpsgl)) { 3465 ret = 2; 3466 goto err; 3467 } 3468 break; 3469 3470 case LPFC_PG_TYPE_INVALID: 3471 default: 3472 scsi_dma_unmap(scsi_cmnd); 3473 lpfc_cmd->seg_cnt = 0; 3474 3475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3476 "9083 Unexpected protection group %i\n", 3477 prot_group_type); 3478 return 2; 3479 } 3480 } 3481 3482 switch (scsi_get_prot_op(scsi_cmnd)) { 3483 case SCSI_PROT_WRITE_STRIP: 3484 case SCSI_PROT_READ_STRIP: 3485 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP; 3486 break; 3487 case SCSI_PROT_WRITE_INSERT: 3488 case SCSI_PROT_READ_INSERT: 3489 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT; 3490 break; 3491 case SCSI_PROT_WRITE_PASS: 3492 case SCSI_PROT_READ_PASS: 3493 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS; 3494 break; 3495 } 3496 3497 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3498 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3499 3500 /* Set first-burst provided it was successfully negotiated */ 3501 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3502 vport->cfg_first_burst_size && 3503 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3504 u32 init_len, total_len; 3505 3506 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3507 init_len = min(total_len, vport->cfg_first_burst_size); 3508 3509 /* Word 4 & 5 */ 3510 wqe->fcp_iwrite.initial_xfer_len = init_len; 3511 wqe->fcp_iwrite.total_xfer_len = total_len; 3512 } else { 3513 /* Word 4 */ 3514 wqe->fcp_iwrite.total_xfer_len = 3515 be32_to_cpu(fcp_cmnd->fcpDl); 3516 } 3517 3518 /* 3519 * If the OAS driver feature is enabled and the lun is enabled for 3520 * OAS, set the oas iocb related flags. 3521 */ 3522 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3523 scsi_cmnd->device->hostdata)->oas_enabled) { 3524 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3525 3526 /* Word 10 */ 3527 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3528 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3529 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3530 (phba->cfg_XLanePriority << 1)); 3531 } 3532 3533 /* Word 7. DIF Flags */ 3534 if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS) 3535 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3536 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP) 3537 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3538 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT) 3539 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3540 3541 lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS | 3542 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3543 3544 return 0; 3545 err: 3546 if (lpfc_cmd->seg_cnt) 3547 scsi_dma_unmap(scsi_cmnd); 3548 if (lpfc_cmd->prot_seg_cnt) 3549 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3550 scsi_prot_sg_count(scsi_cmnd), 3551 scsi_cmnd->sc_data_direction); 3552 3553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3554 "9084 Cannot setup S/G List for HBA" 3555 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3556 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3557 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3558 prot_group_type, num_sge); 3559 3560 lpfc_cmd->seg_cnt = 0; 3561 lpfc_cmd->prot_seg_cnt = 0; 3562 return ret; 3563 } 3564 3565 /** 3566 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3567 * @phba: The Hba for which this call is being executed. 3568 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3569 * 3570 * This routine wraps the actual DMA mapping function pointer from the 3571 * lpfc_hba struct. 3572 * 3573 * Return codes: 3574 * 1 - Error 3575 * 0 - Success 3576 **/ 3577 static inline int 3578 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3579 { 3580 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3581 } 3582 3583 /** 3584 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3585 * using BlockGuard. 3586 * @phba: The Hba for which this call is being executed. 3587 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3588 * 3589 * This routine wraps the actual DMA mapping function pointer from the 3590 * lpfc_hba struct. 3591 * 3592 * Return codes: 3593 * 1 - Error 3594 * 0 - Success 3595 **/ 3596 static inline int 3597 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3598 { 3599 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3600 } 3601 3602 /** 3603 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3604 * buffer 3605 * @vport: Pointer to vport object. 3606 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3607 * @tmo: Timeout value for IO 3608 * 3609 * This routine initializes IOCB/WQE data structure from scsi command 3610 * 3611 * Return codes: 3612 * 1 - Error 3613 * 0 - Success 3614 **/ 3615 static inline int 3616 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3617 uint8_t tmo) 3618 { 3619 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3620 } 3621 3622 /** 3623 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3624 * @phba: Pointer to hba context object. 3625 * @vport: Pointer to vport object. 3626 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3627 * @fcpi_parm: FCP Initiator parameter. 3628 * 3629 * This function posts an event when there is a SCSI command reporting 3630 * error from the scsi device. 3631 **/ 3632 static void 3633 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3634 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3635 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3636 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3637 uint32_t resp_info = fcprsp->rspStatus2; 3638 uint32_t scsi_status = fcprsp->rspStatus3; 3639 struct lpfc_fast_path_event *fast_path_evt = NULL; 3640 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3641 unsigned long flags; 3642 3643 if (!pnode) 3644 return; 3645 3646 /* If there is queuefull or busy condition send a scsi event */ 3647 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3648 (cmnd->result == SAM_STAT_BUSY)) { 3649 fast_path_evt = lpfc_alloc_fast_evt(phba); 3650 if (!fast_path_evt) 3651 return; 3652 fast_path_evt->un.scsi_evt.event_type = 3653 FC_REG_SCSI_EVENT; 3654 fast_path_evt->un.scsi_evt.subcategory = 3655 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3656 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3657 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3658 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3659 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3660 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3661 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3662 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3663 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3664 fast_path_evt = lpfc_alloc_fast_evt(phba); 3665 if (!fast_path_evt) 3666 return; 3667 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3668 FC_REG_SCSI_EVENT; 3669 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3670 LPFC_EVENT_CHECK_COND; 3671 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3672 cmnd->device->lun; 3673 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3674 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3675 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3676 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3677 fast_path_evt->un.check_cond_evt.sense_key = 3678 cmnd->sense_buffer[2] & 0xf; 3679 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3680 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3681 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3682 fcpi_parm && 3683 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3684 ((scsi_status == SAM_STAT_GOOD) && 3685 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3686 /* 3687 * If status is good or resid does not match with fcp_param and 3688 * there is valid fcpi_parm, then there is a read_check error 3689 */ 3690 fast_path_evt = lpfc_alloc_fast_evt(phba); 3691 if (!fast_path_evt) 3692 return; 3693 fast_path_evt->un.read_check_error.header.event_type = 3694 FC_REG_FABRIC_EVENT; 3695 fast_path_evt->un.read_check_error.header.subcategory = 3696 LPFC_EVENT_FCPRDCHKERR; 3697 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3698 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3699 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3700 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3701 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3702 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3703 fast_path_evt->un.read_check_error.fcpiparam = 3704 fcpi_parm; 3705 } else 3706 return; 3707 3708 fast_path_evt->vport = vport; 3709 spin_lock_irqsave(&phba->hbalock, flags); 3710 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3711 spin_unlock_irqrestore(&phba->hbalock, flags); 3712 lpfc_worker_wake_up(phba); 3713 return; 3714 } 3715 3716 /** 3717 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3718 * @phba: The HBA for which this call is being executed. 3719 * @psb: The scsi buffer which is going to be un-mapped. 3720 * 3721 * This routine does DMA un-mapping of scatter gather list of scsi command 3722 * field of @lpfc_cmd for device with SLI-3 interface spec. 3723 **/ 3724 static void 3725 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3726 { 3727 /* 3728 * There are only two special cases to consider. (1) the scsi command 3729 * requested scatter-gather usage or (2) the scsi command allocated 3730 * a request buffer, but did not request use_sg. There is a third 3731 * case, but it does not require resource deallocation. 3732 */ 3733 if (psb->seg_cnt > 0) 3734 scsi_dma_unmap(psb->pCmd); 3735 if (psb->prot_seg_cnt > 0) 3736 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3737 scsi_prot_sg_count(psb->pCmd), 3738 psb->pCmd->sc_data_direction); 3739 } 3740 3741 /** 3742 * lpfc_unblock_requests - allow further commands to be queued. 3743 * @phba: pointer to phba object 3744 * 3745 * For single vport, just call scsi_unblock_requests on physical port. 3746 * For multiple vports, send scsi_unblock_requests for all the vports. 3747 */ 3748 void 3749 lpfc_unblock_requests(struct lpfc_hba *phba) 3750 { 3751 struct lpfc_vport **vports; 3752 struct Scsi_Host *shost; 3753 int i; 3754 3755 if (phba->sli_rev == LPFC_SLI_REV4 && 3756 !phba->sli4_hba.max_cfg_param.vpi_used) { 3757 shost = lpfc_shost_from_vport(phba->pport); 3758 scsi_unblock_requests(shost); 3759 return; 3760 } 3761 3762 vports = lpfc_create_vport_work_array(phba); 3763 if (vports != NULL) 3764 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3765 shost = lpfc_shost_from_vport(vports[i]); 3766 scsi_unblock_requests(shost); 3767 } 3768 lpfc_destroy_vport_work_array(phba, vports); 3769 } 3770 3771 /** 3772 * lpfc_block_requests - prevent further commands from being queued. 3773 * @phba: pointer to phba object 3774 * 3775 * For single vport, just call scsi_block_requests on physical port. 3776 * For multiple vports, send scsi_block_requests for all the vports. 3777 */ 3778 void 3779 lpfc_block_requests(struct lpfc_hba *phba) 3780 { 3781 struct lpfc_vport **vports; 3782 struct Scsi_Host *shost; 3783 int i; 3784 3785 if (atomic_read(&phba->cmf_stop_io)) 3786 return; 3787 3788 if (phba->sli_rev == LPFC_SLI_REV4 && 3789 !phba->sli4_hba.max_cfg_param.vpi_used) { 3790 shost = lpfc_shost_from_vport(phba->pport); 3791 scsi_block_requests(shost); 3792 return; 3793 } 3794 3795 vports = lpfc_create_vport_work_array(phba); 3796 if (vports != NULL) 3797 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3798 shost = lpfc_shost_from_vport(vports[i]); 3799 scsi_block_requests(shost); 3800 } 3801 lpfc_destroy_vport_work_array(phba, vports); 3802 } 3803 3804 /** 3805 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion 3806 * @phba: The HBA for which this call is being executed. 3807 * @time: The latency of the IO that completed (in ns) 3808 * @size: The size of the IO that completed 3809 * @shost: SCSI host the IO completed on (NULL for a NVME IO) 3810 * 3811 * The routine adjusts the various Burst and Bandwidth counters used in 3812 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, 3813 * that means the IO was never issued to the HBA, so this routine is 3814 * just being called to cleanup the counter from a previous 3815 * lpfc_update_cmf_cmd call. 3816 */ 3817 int 3818 lpfc_update_cmf_cmpl(struct lpfc_hba *phba, 3819 uint64_t time, uint32_t size, struct Scsi_Host *shost) 3820 { 3821 struct lpfc_cgn_stat *cgs; 3822 3823 if (time != LPFC_CGN_NOT_SENT) { 3824 /* lat is ns coming in, save latency in us */ 3825 if (time < 1000) 3826 time = 1; 3827 else 3828 time = div_u64(time + 500, 1000); /* round it */ 3829 3830 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); 3831 atomic64_add(size, &cgs->rcv_bytes); 3832 atomic64_add(time, &cgs->rx_latency); 3833 atomic_inc(&cgs->rx_io_cnt); 3834 } 3835 return 0; 3836 } 3837 3838 /** 3839 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission 3840 * @phba: The HBA for which this call is being executed. 3841 * @size: The size of the IO that will be issued 3842 * 3843 * The routine adjusts the various Burst and Bandwidth counters used in 3844 * Congestion management and E2E. 3845 */ 3846 int 3847 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) 3848 { 3849 uint64_t total; 3850 struct lpfc_cgn_stat *cgs; 3851 int cpu; 3852 3853 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ 3854 if (phba->cmf_active_mode == LPFC_CFG_MANAGED && 3855 phba->cmf_max_bytes_per_interval) { 3856 total = 0; 3857 for_each_present_cpu(cpu) { 3858 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3859 total += atomic64_read(&cgs->total_bytes); 3860 } 3861 if (total >= phba->cmf_max_bytes_per_interval) { 3862 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { 3863 lpfc_block_requests(phba); 3864 phba->cmf_last_ts = 3865 lpfc_calc_cmf_latency(phba); 3866 } 3867 atomic_inc(&phba->cmf_busy); 3868 return -EBUSY; 3869 } 3870 if (size > atomic_read(&phba->rx_max_read_cnt)) 3871 atomic_set(&phba->rx_max_read_cnt, size); 3872 } 3873 3874 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id()); 3875 atomic64_add(size, &cgs->total_bytes); 3876 return 0; 3877 } 3878 3879 /** 3880 * lpfc_handle_fcp_err - FCP response handler 3881 * @vport: The virtual port for which this call is being executed. 3882 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3883 * @fcpi_parm: FCP Initiator parameter. 3884 * 3885 * This routine is called to process response IOCB with status field 3886 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3887 * based upon SCSI and FCP error. 3888 **/ 3889 static void 3890 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3891 uint32_t fcpi_parm) 3892 { 3893 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3894 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3895 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3896 uint32_t resp_info = fcprsp->rspStatus2; 3897 uint32_t scsi_status = fcprsp->rspStatus3; 3898 uint32_t *lp; 3899 uint32_t host_status = DID_OK; 3900 uint32_t rsplen = 0; 3901 uint32_t fcpDl; 3902 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3903 3904 3905 /* 3906 * If this is a task management command, there is no 3907 * scsi packet associated with this lpfc_cmd. The driver 3908 * consumes it. 3909 */ 3910 if (fcpcmd->fcpCntl2) { 3911 scsi_status = 0; 3912 goto out; 3913 } 3914 3915 if (resp_info & RSP_LEN_VALID) { 3916 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3917 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3918 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3919 "2719 Invalid response length: " 3920 "tgt x%x lun x%llx cmnd x%x rsplen " 3921 "x%x\n", cmnd->device->id, 3922 cmnd->device->lun, cmnd->cmnd[0], 3923 rsplen); 3924 host_status = DID_ERROR; 3925 goto out; 3926 } 3927 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3928 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3929 "2757 Protocol failure detected during " 3930 "processing of FCP I/O op: " 3931 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 3932 cmnd->device->id, 3933 cmnd->device->lun, cmnd->cmnd[0], 3934 fcprsp->rspInfo3); 3935 host_status = DID_ERROR; 3936 goto out; 3937 } 3938 } 3939 3940 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 3941 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 3942 if (snslen > SCSI_SENSE_BUFFERSIZE) 3943 snslen = SCSI_SENSE_BUFFERSIZE; 3944 3945 if (resp_info & RSP_LEN_VALID) 3946 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3947 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 3948 } 3949 lp = (uint32_t *)cmnd->sense_buffer; 3950 3951 /* special handling for under run conditions */ 3952 if (!scsi_status && (resp_info & RESID_UNDER)) { 3953 /* don't log under runs if fcp set... */ 3954 if (vport->cfg_log_verbose & LOG_FCP) 3955 logit = LOG_FCP_ERROR; 3956 /* unless operator says so */ 3957 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 3958 logit = LOG_FCP_UNDER; 3959 } 3960 3961 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3962 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3963 "Data: x%x x%x x%x x%x x%x\n", 3964 cmnd->cmnd[0], scsi_status, 3965 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 3966 be32_to_cpu(fcprsp->rspResId), 3967 be32_to_cpu(fcprsp->rspSnsLen), 3968 be32_to_cpu(fcprsp->rspRspLen), 3969 fcprsp->rspInfo3); 3970 3971 scsi_set_resid(cmnd, 0); 3972 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 3973 if (resp_info & RESID_UNDER) { 3974 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 3975 3976 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 3977 "9025 FCP Underrun, expected %d, " 3978 "residual %d Data: x%x x%x x%x\n", 3979 fcpDl, 3980 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 3981 cmnd->underflow); 3982 3983 /* 3984 * If there is an under run, check if under run reported by 3985 * storage array is same as the under run reported by HBA. 3986 * If this is not same, there is a dropped frame. 3987 */ 3988 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 3989 lpfc_printf_vlog(vport, KERN_WARNING, 3990 LOG_FCP | LOG_FCP_ERROR, 3991 "9026 FCP Read Check Error " 3992 "and Underrun Data: x%x x%x x%x x%x\n", 3993 fcpDl, 3994 scsi_get_resid(cmnd), fcpi_parm, 3995 cmnd->cmnd[0]); 3996 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3997 host_status = DID_ERROR; 3998 } 3999 /* 4000 * The cmnd->underflow is the minimum number of bytes that must 4001 * be transferred for this command. Provided a sense condition 4002 * is not present, make sure the actual amount transferred is at 4003 * least the underflow value or fail. 4004 */ 4005 if (!(resp_info & SNS_LEN_VALID) && 4006 (scsi_status == SAM_STAT_GOOD) && 4007 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 4008 < cmnd->underflow)) { 4009 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4010 "9027 FCP command x%x residual " 4011 "underrun converted to error " 4012 "Data: x%x x%x x%x\n", 4013 cmnd->cmnd[0], scsi_bufflen(cmnd), 4014 scsi_get_resid(cmnd), cmnd->underflow); 4015 host_status = DID_ERROR; 4016 } 4017 } else if (resp_info & RESID_OVER) { 4018 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4019 "9028 FCP command x%x residual overrun error. " 4020 "Data: x%x x%x\n", cmnd->cmnd[0], 4021 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 4022 host_status = DID_ERROR; 4023 4024 /* 4025 * Check SLI validation that all the transfer was actually done 4026 * (fcpi_parm should be zero). Apply check only to reads. 4027 */ 4028 } else if (fcpi_parm) { 4029 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 4030 "9029 FCP %s Check Error Data: " 4031 "x%x x%x x%x x%x x%x\n", 4032 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 4033 "Read" : "Write"), 4034 fcpDl, be32_to_cpu(fcprsp->rspResId), 4035 fcpi_parm, cmnd->cmnd[0], scsi_status); 4036 4037 /* There is some issue with the LPe12000 that causes it 4038 * to miscalculate the fcpi_parm and falsely trip this 4039 * recovery logic. Detect this case and don't error when true. 4040 */ 4041 if (fcpi_parm > fcpDl) 4042 goto out; 4043 4044 switch (scsi_status) { 4045 case SAM_STAT_GOOD: 4046 case SAM_STAT_CHECK_CONDITION: 4047 /* Fabric dropped a data frame. Fail any successful 4048 * command in which we detected dropped frames. 4049 * A status of good or some check conditions could 4050 * be considered a successful command. 4051 */ 4052 host_status = DID_ERROR; 4053 break; 4054 } 4055 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4056 } 4057 4058 out: 4059 cmnd->result = host_status << 16 | scsi_status; 4060 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 4061 } 4062 4063 /** 4064 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 4065 * @phba: The hba for which this call is being executed. 4066 * @pwqeIn: The command WQE for the scsi cmnd. 4067 * @pwqeOut: Pointer to driver response WQE object. 4068 * 4069 * This routine assigns scsi command result by looking into response WQE 4070 * status field appropriately. This routine handles QUEUE FULL condition as 4071 * well by ramping down device queue depth. 4072 **/ 4073 static void 4074 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 4075 struct lpfc_iocbq *pwqeOut) 4076 { 4077 struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf; 4078 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl; 4079 struct lpfc_vport *vport = pwqeIn->vport; 4080 struct lpfc_rport_data *rdata; 4081 struct lpfc_nodelist *ndlp; 4082 struct scsi_cmnd *cmd; 4083 unsigned long flags; 4084 struct lpfc_fast_path_event *fast_path_evt; 4085 struct Scsi_Host *shost; 4086 u32 logit = LOG_FCP; 4087 u32 status, idx; 4088 u32 lat; 4089 u8 wait_xb_clr = 0; 4090 4091 /* Sanity check on return of outstanding command */ 4092 if (!lpfc_cmd) { 4093 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4094 "9032 Null lpfc_cmd pointer. No " 4095 "release, skip completion\n"); 4096 return; 4097 } 4098 4099 rdata = lpfc_cmd->rdata; 4100 ndlp = rdata->pnode; 4101 4102 /* Sanity check on return of outstanding command */ 4103 cmd = lpfc_cmd->pCmd; 4104 if (!cmd) { 4105 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4106 "9042 I/O completion: Not an active IO\n"); 4107 lpfc_release_scsi_buf(phba, lpfc_cmd); 4108 return; 4109 } 4110 /* Guard against abort handler being called at same time */ 4111 spin_lock(&lpfc_cmd->buf_lock); 4112 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4113 if (phba->sli4_hba.hdwq) 4114 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4115 4116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4117 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4118 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4119 #endif 4120 shost = cmd->device->host; 4121 4122 status = bf_get(lpfc_wcqe_c_status, wcqe); 4123 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); 4124 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4125 4126 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4127 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4128 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4129 if (phba->cfg_fcp_wait_abts_rsp) 4130 wait_xb_clr = 1; 4131 } 4132 4133 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4134 if (lpfc_cmd->prot_data_type) { 4135 struct scsi_dif_tuple *src = NULL; 4136 4137 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4138 /* 4139 * Used to restore any changes to protection 4140 * data for error injection. 4141 */ 4142 switch (lpfc_cmd->prot_data_type) { 4143 case LPFC_INJERR_REFTAG: 4144 src->ref_tag = 4145 lpfc_cmd->prot_data; 4146 break; 4147 case LPFC_INJERR_APPTAG: 4148 src->app_tag = 4149 (uint16_t)lpfc_cmd->prot_data; 4150 break; 4151 case LPFC_INJERR_GUARD: 4152 src->guard_tag = 4153 (uint16_t)lpfc_cmd->prot_data; 4154 break; 4155 default: 4156 break; 4157 } 4158 4159 lpfc_cmd->prot_data = 0; 4160 lpfc_cmd->prot_data_type = 0; 4161 lpfc_cmd->prot_data_segment = NULL; 4162 } 4163 #endif 4164 if (unlikely(lpfc_cmd->status)) { 4165 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4166 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4167 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4168 else if (lpfc_cmd->status >= IOSTAT_CNT) 4169 lpfc_cmd->status = IOSTAT_DEFAULT; 4170 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4171 !lpfc_cmd->fcp_rsp->rspStatus3 && 4172 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4173 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4174 logit = 0; 4175 else 4176 logit = LOG_FCP | LOG_FCP_UNDER; 4177 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4178 "9034 FCP cmd x%x failed <%d/%lld> " 4179 "status: x%x result: x%x " 4180 "sid: x%x did: x%x oxid: x%x " 4181 "Data: x%x x%x x%x\n", 4182 cmd->cmnd[0], 4183 cmd->device ? cmd->device->id : 0xffff, 4184 cmd->device ? cmd->device->lun : 0xffff, 4185 lpfc_cmd->status, lpfc_cmd->result, 4186 vport->fc_myDID, 4187 (ndlp) ? ndlp->nlp_DID : 0, 4188 lpfc_cmd->cur_iocbq.sli4_xritag, 4189 wcqe->parameter, wcqe->total_data_placed, 4190 lpfc_cmd->cur_iocbq.iotag); 4191 } 4192 4193 switch (lpfc_cmd->status) { 4194 case IOSTAT_SUCCESS: 4195 cmd->result = DID_OK << 16; 4196 break; 4197 case IOSTAT_FCP_RSP_ERROR: 4198 lpfc_handle_fcp_err(vport, lpfc_cmd, 4199 pwqeIn->wqe.fcp_iread.total_xfer_len - 4200 wcqe->total_data_placed); 4201 break; 4202 case IOSTAT_NPORT_BSY: 4203 case IOSTAT_FABRIC_BSY: 4204 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4205 fast_path_evt = lpfc_alloc_fast_evt(phba); 4206 if (!fast_path_evt) 4207 break; 4208 fast_path_evt->un.fabric_evt.event_type = 4209 FC_REG_FABRIC_EVENT; 4210 fast_path_evt->un.fabric_evt.subcategory = 4211 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4212 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4213 if (ndlp) { 4214 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4215 &ndlp->nlp_portname, 4216 sizeof(struct lpfc_name)); 4217 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4218 &ndlp->nlp_nodename, 4219 sizeof(struct lpfc_name)); 4220 } 4221 fast_path_evt->vport = vport; 4222 fast_path_evt->work_evt.evt = 4223 LPFC_EVT_FASTPATH_MGMT_EVT; 4224 spin_lock_irqsave(&phba->hbalock, flags); 4225 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4226 &phba->work_list); 4227 spin_unlock_irqrestore(&phba->hbalock, flags); 4228 lpfc_worker_wake_up(phba); 4229 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4230 "9035 Fabric/Node busy FCP cmd x%x failed" 4231 " <%d/%lld> " 4232 "status: x%x result: x%x " 4233 "sid: x%x did: x%x oxid: x%x " 4234 "Data: x%x x%x x%x\n", 4235 cmd->cmnd[0], 4236 cmd->device ? cmd->device->id : 0xffff, 4237 cmd->device ? cmd->device->lun : 0xffff, 4238 lpfc_cmd->status, lpfc_cmd->result, 4239 vport->fc_myDID, 4240 (ndlp) ? ndlp->nlp_DID : 0, 4241 lpfc_cmd->cur_iocbq.sli4_xritag, 4242 wcqe->parameter, 4243 wcqe->total_data_placed, 4244 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4245 break; 4246 case IOSTAT_REMOTE_STOP: 4247 if (ndlp) { 4248 /* This I/O was aborted by the target, we don't 4249 * know the rxid and because we did not send the 4250 * ABTS we cannot generate and RRQ. 4251 */ 4252 lpfc_set_rrq_active(phba, ndlp, 4253 lpfc_cmd->cur_iocbq.sli4_lxritag, 4254 0, 0); 4255 } 4256 fallthrough; 4257 case IOSTAT_LOCAL_REJECT: 4258 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4259 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4260 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4261 lpfc_cmd->result == 4262 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4263 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4264 lpfc_cmd->result == 4265 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4266 cmd->result = DID_NO_CONNECT << 16; 4267 break; 4268 } 4269 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4270 lpfc_cmd->result == IOERR_LINK_DOWN || 4271 lpfc_cmd->result == IOERR_NO_RESOURCES || 4272 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4273 lpfc_cmd->result == IOERR_RPI_SUSPENDED || 4274 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4275 cmd->result = DID_REQUEUE << 16; 4276 break; 4277 } 4278 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4279 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4280 status == CQE_STATUS_DI_ERROR) { 4281 if (scsi_get_prot_op(cmd) != 4282 SCSI_PROT_NORMAL) { 4283 /* 4284 * This is a response for a BG enabled 4285 * cmd. Parse BG error 4286 */ 4287 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); 4288 break; 4289 } else { 4290 lpfc_printf_vlog(vport, KERN_WARNING, 4291 LOG_BG, 4292 "9040 non-zero BGSTAT " 4293 "on unprotected cmd\n"); 4294 } 4295 } 4296 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4297 "9036 Local Reject FCP cmd x%x failed" 4298 " <%d/%lld> " 4299 "status: x%x result: x%x " 4300 "sid: x%x did: x%x oxid: x%x " 4301 "Data: x%x x%x x%x\n", 4302 cmd->cmnd[0], 4303 cmd->device ? cmd->device->id : 0xffff, 4304 cmd->device ? cmd->device->lun : 0xffff, 4305 lpfc_cmd->status, lpfc_cmd->result, 4306 vport->fc_myDID, 4307 (ndlp) ? ndlp->nlp_DID : 0, 4308 lpfc_cmd->cur_iocbq.sli4_xritag, 4309 wcqe->parameter, 4310 wcqe->total_data_placed, 4311 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4312 fallthrough; 4313 default: 4314 if (lpfc_cmd->status >= IOSTAT_CNT) 4315 lpfc_cmd->status = IOSTAT_DEFAULT; 4316 cmd->result = DID_ERROR << 16; 4317 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 4318 "9037 FCP Completion Error: xri %x " 4319 "status x%x result x%x [x%x] " 4320 "placed x%x\n", 4321 lpfc_cmd->cur_iocbq.sli4_xritag, 4322 lpfc_cmd->status, lpfc_cmd->result, 4323 wcqe->parameter, 4324 wcqe->total_data_placed); 4325 } 4326 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4327 u32 *lp = (u32 *)cmd->sense_buffer; 4328 4329 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4330 "9039 Iodone <%d/%llu> cmd x%px, error " 4331 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", 4332 cmd->device->id, cmd->device->lun, cmd, 4333 cmd->result, *lp, *(lp + 3), 4334 (u64)scsi_get_lba(cmd), 4335 cmd->retries, scsi_get_resid(cmd)); 4336 } 4337 4338 lpfc_update_stats(vport, lpfc_cmd); 4339 4340 if (vport->cfg_max_scsicmpl_time && 4341 time_after(jiffies, lpfc_cmd->start_time + 4342 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4343 spin_lock_irqsave(shost->host_lock, flags); 4344 if (ndlp) { 4345 if (ndlp->cmd_qdepth > 4346 atomic_read(&ndlp->cmd_pending) && 4347 (atomic_read(&ndlp->cmd_pending) > 4348 LPFC_MIN_TGT_QDEPTH) && 4349 (cmd->cmnd[0] == READ_10 || 4350 cmd->cmnd[0] == WRITE_10)) 4351 ndlp->cmd_qdepth = 4352 atomic_read(&ndlp->cmd_pending); 4353 4354 ndlp->last_change_time = jiffies; 4355 } 4356 spin_unlock_irqrestore(shost->host_lock, flags); 4357 } 4358 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4359 4360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4361 if (lpfc_cmd->ts_cmd_start) { 4362 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4363 lpfc_cmd->ts_data_io = ktime_get_ns(); 4364 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4365 lpfc_io_ktime(phba, lpfc_cmd); 4366 } 4367 #endif 4368 if (likely(!wait_xb_clr)) 4369 lpfc_cmd->pCmd = NULL; 4370 spin_unlock(&lpfc_cmd->buf_lock); 4371 4372 /* Check if IO qualified for CMF */ 4373 if (phba->cmf_active_mode != LPFC_CFG_OFF && 4374 cmd->sc_data_direction == DMA_FROM_DEVICE && 4375 (scsi_sg_count(cmd))) { 4376 /* Used when calculating average latency */ 4377 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; 4378 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); 4379 } 4380 4381 if (wait_xb_clr) 4382 goto out; 4383 4384 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4385 scsi_done(cmd); 4386 4387 /* 4388 * If there is an abort thread waiting for command completion 4389 * wake up the thread. 4390 */ 4391 spin_lock(&lpfc_cmd->buf_lock); 4392 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; 4393 if (lpfc_cmd->waitq) 4394 wake_up(lpfc_cmd->waitq); 4395 spin_unlock(&lpfc_cmd->buf_lock); 4396 out: 4397 lpfc_release_scsi_buf(phba, lpfc_cmd); 4398 } 4399 4400 /** 4401 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4402 * @phba: The Hba for which this call is being executed. 4403 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4404 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4405 * 4406 * This routine assigns scsi command result by looking into response IOCB 4407 * status field appropriately. This routine handles QUEUE FULL condition as 4408 * well by ramping down device queue depth. 4409 **/ 4410 static void 4411 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4412 struct lpfc_iocbq *pIocbOut) 4413 { 4414 struct lpfc_io_buf *lpfc_cmd = 4415 (struct lpfc_io_buf *) pIocbIn->io_buf; 4416 struct lpfc_vport *vport = pIocbIn->vport; 4417 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4418 struct lpfc_nodelist *pnode = rdata->pnode; 4419 struct scsi_cmnd *cmd; 4420 unsigned long flags; 4421 struct lpfc_fast_path_event *fast_path_evt; 4422 struct Scsi_Host *shost; 4423 int idx; 4424 uint32_t logit = LOG_FCP; 4425 4426 /* Guard against abort handler being called at same time */ 4427 spin_lock(&lpfc_cmd->buf_lock); 4428 4429 /* Sanity check on return of outstanding command */ 4430 cmd = lpfc_cmd->pCmd; 4431 if (!cmd || !phba) { 4432 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4433 "2621 IO completion: Not an active IO\n"); 4434 spin_unlock(&lpfc_cmd->buf_lock); 4435 return; 4436 } 4437 4438 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4439 if (phba->sli4_hba.hdwq) 4440 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4441 4442 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4443 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4444 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4445 #endif 4446 shost = cmd->device->host; 4447 4448 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4449 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4450 /* pick up SLI4 exchange busy status from HBA */ 4451 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4452 if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY) 4453 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4454 4455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4456 if (lpfc_cmd->prot_data_type) { 4457 struct scsi_dif_tuple *src = NULL; 4458 4459 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4460 /* 4461 * Used to restore any changes to protection 4462 * data for error injection. 4463 */ 4464 switch (lpfc_cmd->prot_data_type) { 4465 case LPFC_INJERR_REFTAG: 4466 src->ref_tag = 4467 lpfc_cmd->prot_data; 4468 break; 4469 case LPFC_INJERR_APPTAG: 4470 src->app_tag = 4471 (uint16_t)lpfc_cmd->prot_data; 4472 break; 4473 case LPFC_INJERR_GUARD: 4474 src->guard_tag = 4475 (uint16_t)lpfc_cmd->prot_data; 4476 break; 4477 default: 4478 break; 4479 } 4480 4481 lpfc_cmd->prot_data = 0; 4482 lpfc_cmd->prot_data_type = 0; 4483 lpfc_cmd->prot_data_segment = NULL; 4484 } 4485 #endif 4486 4487 if (unlikely(lpfc_cmd->status)) { 4488 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4489 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4490 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4491 else if (lpfc_cmd->status >= IOSTAT_CNT) 4492 lpfc_cmd->status = IOSTAT_DEFAULT; 4493 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4494 !lpfc_cmd->fcp_rsp->rspStatus3 && 4495 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4496 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4497 logit = 0; 4498 else 4499 logit = LOG_FCP | LOG_FCP_UNDER; 4500 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4501 "9030 FCP cmd x%x failed <%d/%lld> " 4502 "status: x%x result: x%x " 4503 "sid: x%x did: x%x oxid: x%x " 4504 "Data: x%x x%x\n", 4505 cmd->cmnd[0], 4506 cmd->device ? cmd->device->id : 0xffff, 4507 cmd->device ? cmd->device->lun : 0xffff, 4508 lpfc_cmd->status, lpfc_cmd->result, 4509 vport->fc_myDID, 4510 (pnode) ? pnode->nlp_DID : 0, 4511 phba->sli_rev == LPFC_SLI_REV4 ? 4512 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4513 pIocbOut->iocb.ulpContext, 4514 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4515 4516 switch (lpfc_cmd->status) { 4517 case IOSTAT_FCP_RSP_ERROR: 4518 /* Call FCP RSP handler to determine result */ 4519 lpfc_handle_fcp_err(vport, lpfc_cmd, 4520 pIocbOut->iocb.un.fcpi.fcpi_parm); 4521 break; 4522 case IOSTAT_NPORT_BSY: 4523 case IOSTAT_FABRIC_BSY: 4524 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4525 fast_path_evt = lpfc_alloc_fast_evt(phba); 4526 if (!fast_path_evt) 4527 break; 4528 fast_path_evt->un.fabric_evt.event_type = 4529 FC_REG_FABRIC_EVENT; 4530 fast_path_evt->un.fabric_evt.subcategory = 4531 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4532 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4533 if (pnode) { 4534 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4535 &pnode->nlp_portname, 4536 sizeof(struct lpfc_name)); 4537 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4538 &pnode->nlp_nodename, 4539 sizeof(struct lpfc_name)); 4540 } 4541 fast_path_evt->vport = vport; 4542 fast_path_evt->work_evt.evt = 4543 LPFC_EVT_FASTPATH_MGMT_EVT; 4544 spin_lock_irqsave(&phba->hbalock, flags); 4545 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4546 &phba->work_list); 4547 spin_unlock_irqrestore(&phba->hbalock, flags); 4548 lpfc_worker_wake_up(phba); 4549 break; 4550 case IOSTAT_LOCAL_REJECT: 4551 case IOSTAT_REMOTE_STOP: 4552 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4553 lpfc_cmd->result == 4554 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4555 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4556 lpfc_cmd->result == 4557 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4558 cmd->result = DID_NO_CONNECT << 16; 4559 break; 4560 } 4561 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4562 lpfc_cmd->result == IOERR_NO_RESOURCES || 4563 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4564 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4565 cmd->result = DID_REQUEUE << 16; 4566 break; 4567 } 4568 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4569 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4570 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4571 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4572 /* 4573 * This is a response for a BG enabled 4574 * cmd. Parse BG error 4575 */ 4576 lpfc_parse_bg_err(phba, lpfc_cmd, 4577 pIocbOut); 4578 break; 4579 } else { 4580 lpfc_printf_vlog(vport, KERN_WARNING, 4581 LOG_BG, 4582 "9031 non-zero BGSTAT " 4583 "on unprotected cmd\n"); 4584 } 4585 } 4586 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4587 && (phba->sli_rev == LPFC_SLI_REV4) 4588 && pnode) { 4589 /* This IO was aborted by the target, we don't 4590 * know the rxid and because we did not send the 4591 * ABTS we cannot generate and RRQ. 4592 */ 4593 lpfc_set_rrq_active(phba, pnode, 4594 lpfc_cmd->cur_iocbq.sli4_lxritag, 4595 0, 0); 4596 } 4597 fallthrough; 4598 default: 4599 cmd->result = DID_ERROR << 16; 4600 break; 4601 } 4602 4603 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4604 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4605 SAM_STAT_BUSY; 4606 } else 4607 cmd->result = DID_OK << 16; 4608 4609 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4610 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4611 4612 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4613 "0710 Iodone <%d/%llu> cmd x%px, error " 4614 "x%x SNS x%x x%x Data: x%x x%x\n", 4615 cmd->device->id, cmd->device->lun, cmd, 4616 cmd->result, *lp, *(lp + 3), cmd->retries, 4617 scsi_get_resid(cmd)); 4618 } 4619 4620 lpfc_update_stats(vport, lpfc_cmd); 4621 if (vport->cfg_max_scsicmpl_time && 4622 time_after(jiffies, lpfc_cmd->start_time + 4623 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4624 spin_lock_irqsave(shost->host_lock, flags); 4625 if (pnode) { 4626 if (pnode->cmd_qdepth > 4627 atomic_read(&pnode->cmd_pending) && 4628 (atomic_read(&pnode->cmd_pending) > 4629 LPFC_MIN_TGT_QDEPTH) && 4630 ((cmd->cmnd[0] == READ_10) || 4631 (cmd->cmnd[0] == WRITE_10))) 4632 pnode->cmd_qdepth = 4633 atomic_read(&pnode->cmd_pending); 4634 4635 pnode->last_change_time = jiffies; 4636 } 4637 spin_unlock_irqrestore(shost->host_lock, flags); 4638 } 4639 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4640 4641 lpfc_cmd->pCmd = NULL; 4642 spin_unlock(&lpfc_cmd->buf_lock); 4643 4644 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4645 if (lpfc_cmd->ts_cmd_start) { 4646 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4647 lpfc_cmd->ts_data_io = ktime_get_ns(); 4648 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4649 lpfc_io_ktime(phba, lpfc_cmd); 4650 } 4651 #endif 4652 4653 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4654 scsi_done(cmd); 4655 4656 /* 4657 * If there is an abort thread waiting for command completion 4658 * wake up the thread. 4659 */ 4660 spin_lock(&lpfc_cmd->buf_lock); 4661 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED; 4662 if (lpfc_cmd->waitq) 4663 wake_up(lpfc_cmd->waitq); 4664 spin_unlock(&lpfc_cmd->buf_lock); 4665 4666 lpfc_release_scsi_buf(phba, lpfc_cmd); 4667 } 4668 4669 /** 4670 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4671 * @vport: Pointer to vport object. 4672 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4673 * @tmo: timeout value for the IO 4674 * 4675 * Based on the data-direction of the command, initialize IOCB 4676 * in the I/O buffer. Fill in the IOCB fields which are independent 4677 * of the scsi buffer 4678 * 4679 * RETURNS 0 - SUCCESS, 4680 **/ 4681 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4682 struct lpfc_io_buf *lpfc_cmd, 4683 uint8_t tmo) 4684 { 4685 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4686 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4687 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4688 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4689 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4690 int datadir = scsi_cmnd->sc_data_direction; 4691 u32 fcpdl; 4692 4693 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4694 4695 /* 4696 * There are three possibilities here - use scatter-gather segment, use 4697 * the single mapping, or neither. Start the lpfc command prep by 4698 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4699 * data bde entry. 4700 */ 4701 if (scsi_sg_count(scsi_cmnd)) { 4702 if (datadir == DMA_TO_DEVICE) { 4703 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4704 iocb_cmd->ulpPU = PARM_READ_CHECK; 4705 if (vport->cfg_first_burst_size && 4706 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4707 u32 xrdy_len; 4708 4709 fcpdl = scsi_bufflen(scsi_cmnd); 4710 xrdy_len = min(fcpdl, 4711 vport->cfg_first_burst_size); 4712 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4713 } 4714 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4715 } else { 4716 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4717 iocb_cmd->ulpPU = PARM_READ_CHECK; 4718 fcp_cmnd->fcpCntl3 = READ_DATA; 4719 } 4720 } else { 4721 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4722 iocb_cmd->un.fcpi.fcpi_parm = 0; 4723 iocb_cmd->ulpPU = 0; 4724 fcp_cmnd->fcpCntl3 = 0; 4725 } 4726 4727 /* 4728 * Finish initializing those IOCB fields that are independent 4729 * of the scsi_cmnd request_buffer 4730 */ 4731 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4732 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4733 piocbq->iocb.ulpFCP2Rcvy = 1; 4734 else 4735 piocbq->iocb.ulpFCP2Rcvy = 0; 4736 4737 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4738 piocbq->io_buf = lpfc_cmd; 4739 if (!piocbq->cmd_cmpl) 4740 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4741 piocbq->iocb.ulpTimeout = tmo; 4742 piocbq->vport = vport; 4743 return 0; 4744 } 4745 4746 /** 4747 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4748 * @vport: Pointer to vport object. 4749 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4750 * @tmo: timeout value for the IO 4751 * 4752 * Based on the data-direction of the command copy WQE template 4753 * to I/O buffer WQE. Fill in the WQE fields which are independent 4754 * of the scsi buffer 4755 * 4756 * RETURNS 0 - SUCCESS, 4757 **/ 4758 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4759 struct lpfc_io_buf *lpfc_cmd, 4760 uint8_t tmo) 4761 { 4762 struct lpfc_hba *phba = vport->phba; 4763 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4764 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4765 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4766 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4767 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4768 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4769 u16 idx = lpfc_cmd->hdwq_no; 4770 int datadir = scsi_cmnd->sc_data_direction; 4771 4772 hdwq = &phba->sli4_hba.hdwq[idx]; 4773 4774 /* Initialize 64 bytes only */ 4775 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4776 4777 /* 4778 * There are three possibilities here - use scatter-gather segment, use 4779 * the single mapping, or neither. 4780 */ 4781 if (scsi_sg_count(scsi_cmnd)) { 4782 if (datadir == DMA_TO_DEVICE) { 4783 /* From the iwrite template, initialize words 7 - 11 */ 4784 memcpy(&wqe->words[7], 4785 &lpfc_iwrite_cmd_template.words[7], 4786 sizeof(uint32_t) * 5); 4787 4788 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4789 if (hdwq) 4790 hdwq->scsi_cstat.output_requests++; 4791 } else { 4792 /* From the iread template, initialize words 7 - 11 */ 4793 memcpy(&wqe->words[7], 4794 &lpfc_iread_cmd_template.words[7], 4795 sizeof(uint32_t) * 5); 4796 4797 /* Word 7 */ 4798 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4799 4800 fcp_cmnd->fcpCntl3 = READ_DATA; 4801 if (hdwq) 4802 hdwq->scsi_cstat.input_requests++; 4803 4804 /* For a CMF Managed port, iod must be zero'ed */ 4805 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 4806 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 4807 LPFC_WQE_IOD_NONE); 4808 } 4809 } else { 4810 /* From the icmnd template, initialize words 4 - 11 */ 4811 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4812 sizeof(uint32_t) * 8); 4813 4814 /* Word 7 */ 4815 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4816 4817 fcp_cmnd->fcpCntl3 = 0; 4818 if (hdwq) 4819 hdwq->scsi_cstat.control_requests++; 4820 } 4821 4822 /* 4823 * Finish initializing those WQE fields that are independent 4824 * of the request_buffer 4825 */ 4826 4827 /* Word 3 */ 4828 bf_set(payload_offset_len, &wqe->fcp_icmd, 4829 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4830 4831 /* Word 6 */ 4832 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4833 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4834 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4835 4836 /* Word 7*/ 4837 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4838 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4839 4840 bf_set(wqe_class, &wqe->generic.wqe_com, 4841 (pnode->nlp_fcp_info & 0x0f)); 4842 4843 /* Word 8 */ 4844 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4845 4846 /* Word 9 */ 4847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4848 4849 pwqeq->vport = vport; 4850 pwqeq->io_buf = lpfc_cmd; 4851 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4852 pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4853 4854 return 0; 4855 } 4856 4857 /** 4858 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4859 * @vport: The virtual port for which this call is being executed. 4860 * @lpfc_cmd: The scsi command which needs to send. 4861 * @pnode: Pointer to lpfc_nodelist. 4862 * 4863 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4864 * to transfer for device with SLI3 interface spec. 4865 **/ 4866 static int 4867 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4868 struct lpfc_nodelist *pnode) 4869 { 4870 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4871 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4872 u8 *ptr; 4873 4874 if (!pnode) 4875 return 0; 4876 4877 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4878 /* clear task management bits */ 4879 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4880 4881 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4882 &lpfc_cmd->fcp_cmnd->fcp_lun); 4883 4884 ptr = &fcp_cmnd->fcpCdb[0]; 4885 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4886 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4887 ptr += scsi_cmnd->cmd_len; 4888 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4889 } 4890 4891 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4892 4893 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4894 4895 return 0; 4896 } 4897 4898 /** 4899 * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit 4900 * @vport: The virtual port for which this call is being executed. 4901 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4902 * @lun: Logical unit number. 4903 * @task_mgmt_cmd: SCSI task management command. 4904 * 4905 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4906 * for device with SLI-3 interface spec. 4907 * 4908 * Return codes: 4909 * 0 - Error 4910 * 1 - Success 4911 **/ 4912 static int 4913 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, 4914 struct lpfc_io_buf *lpfc_cmd, 4915 u64 lun, u8 task_mgmt_cmd) 4916 { 4917 struct lpfc_iocbq *piocbq; 4918 IOCB_t *piocb; 4919 struct fcp_cmnd *fcp_cmnd; 4920 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4921 struct lpfc_nodelist *ndlp = rdata->pnode; 4922 4923 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4924 return 0; 4925 4926 piocbq = &(lpfc_cmd->cur_iocbq); 4927 piocbq->vport = vport; 4928 4929 piocb = &piocbq->iocb; 4930 4931 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4932 /* Clear out any old data in the FCP command area */ 4933 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4934 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4935 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4936 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4937 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 4938 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4939 piocb->ulpContext = ndlp->nlp_rpi; 4940 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4941 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4942 piocb->ulpPU = 0; 4943 piocb->un.fcpi.fcpi_parm = 0; 4944 4945 /* ulpTimeout is only one byte */ 4946 if (lpfc_cmd->timeout > 0xff) { 4947 /* 4948 * Do not timeout the command at the firmware level. 4949 * The driver will provide the timeout mechanism. 4950 */ 4951 piocb->ulpTimeout = 0; 4952 } else 4953 piocb->ulpTimeout = lpfc_cmd->timeout; 4954 4955 return 1; 4956 } 4957 4958 /** 4959 * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit 4960 * @vport: The virtual port for which this call is being executed. 4961 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4962 * @lun: Logical unit number. 4963 * @task_mgmt_cmd: SCSI task management command. 4964 * 4965 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4966 * for device with SLI-4 interface spec. 4967 * 4968 * Return codes: 4969 * 0 - Error 4970 * 1 - Success 4971 **/ 4972 static int 4973 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, 4974 struct lpfc_io_buf *lpfc_cmd, 4975 u64 lun, u8 task_mgmt_cmd) 4976 { 4977 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4978 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4979 struct fcp_cmnd *fcp_cmnd; 4980 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4981 struct lpfc_nodelist *ndlp = rdata->pnode; 4982 4983 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4984 return 0; 4985 4986 pwqeq->vport = vport; 4987 /* Initialize 64 bytes only */ 4988 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4989 4990 /* From the icmnd template, initialize words 4 - 11 */ 4991 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4992 sizeof(uint32_t) * 8); 4993 4994 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4995 /* Clear out any old data in the FCP command area */ 4996 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4997 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4998 fcp_cmnd->fcpCntl3 = 0; 4999 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 5000 5001 bf_set(payload_offset_len, &wqe->fcp_icmd, 5002 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 5003 bf_set(cmd_buff_len, &wqe->fcp_icmd, 0); 5004 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */ 5005 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 5006 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 5007 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0)); 5008 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, 5009 (ndlp->nlp_fcp_info & 0x0f)); 5010 5011 /* ulpTimeout is only one byte */ 5012 if (lpfc_cmd->timeout > 0xff) { 5013 /* 5014 * Do not timeout the command at the firmware level. 5015 * The driver will provide the timeout mechanism. 5016 */ 5017 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0); 5018 } else { 5019 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout); 5020 } 5021 5022 lpfc_prep_embed_io(vport->phba, lpfc_cmd); 5023 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 5024 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 5025 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 5026 5027 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 5028 5029 return 1; 5030 } 5031 5032 /** 5033 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 5034 * @phba: The hba struct for which this call is being executed. 5035 * @dev_grp: The HBA PCI-Device group number. 5036 * 5037 * This routine sets up the SCSI interface API function jump table in @phba 5038 * struct. 5039 * Returns: 0 - success, -ENODEV - failure. 5040 **/ 5041 int 5042 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5043 { 5044 5045 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 5046 5047 switch (dev_grp) { 5048 case LPFC_PCI_DEV_LP: 5049 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 5050 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 5051 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 5052 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 5053 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 5054 phba->lpfc_scsi_prep_task_mgmt_cmd = 5055 lpfc_scsi_prep_task_mgmt_cmd_s3; 5056 break; 5057 case LPFC_PCI_DEV_OC: 5058 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 5059 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 5060 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 5061 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 5062 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 5063 phba->lpfc_scsi_prep_task_mgmt_cmd = 5064 lpfc_scsi_prep_task_mgmt_cmd_s4; 5065 break; 5066 default: 5067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5068 "1418 Invalid HBA PCI-device group: 0x%x\n", 5069 dev_grp); 5070 return -ENODEV; 5071 } 5072 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 5073 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 5074 return 0; 5075 } 5076 5077 /** 5078 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 5079 * @phba: The Hba for which this call is being executed. 5080 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 5081 * @rspiocbq: Pointer to lpfc_iocbq data structure. 5082 * 5083 * This routine is IOCB completion routine for device reset and target reset 5084 * routine. This routine release scsi buffer associated with lpfc_cmd. 5085 **/ 5086 static void 5087 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 5088 struct lpfc_iocbq *cmdiocbq, 5089 struct lpfc_iocbq *rspiocbq) 5090 { 5091 struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf; 5092 if (lpfc_cmd) 5093 lpfc_release_scsi_buf(phba, lpfc_cmd); 5094 return; 5095 } 5096 5097 /** 5098 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 5099 * if issuing a pci_bus_reset is possibly unsafe 5100 * @phba: lpfc_hba pointer. 5101 * 5102 * Description: 5103 * Walks the bus_list to ensure only PCI devices with Emulex 5104 * vendor id, device ids that support hot reset, and only one occurrence 5105 * of function 0. 5106 * 5107 * Returns: 5108 * -EBADSLT, detected invalid device 5109 * 0, successful 5110 */ 5111 int 5112 lpfc_check_pci_resettable(struct lpfc_hba *phba) 5113 { 5114 const struct pci_dev *pdev = phba->pcidev; 5115 struct pci_dev *ptr = NULL; 5116 u8 counter = 0; 5117 5118 /* Walk the list of devices on the pci_dev's bus */ 5119 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 5120 /* Check for Emulex Vendor ID */ 5121 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5122 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5123 "8346 Non-Emulex vendor found: " 5124 "0x%04x\n", ptr->vendor); 5125 return -EBADSLT; 5126 } 5127 5128 /* Check for valid Emulex Device ID */ 5129 if (phba->sli_rev != LPFC_SLI_REV4 || 5130 phba->hba_flag & HBA_FCOE_MODE) { 5131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5132 "8347 Incapable PCI reset device: " 5133 "0x%04x\n", ptr->device); 5134 return -EBADSLT; 5135 } 5136 5137 /* Check for only one function 0 ID to ensure only one HBA on 5138 * secondary bus 5139 */ 5140 if (ptr->devfn == 0) { 5141 if (++counter > 1) { 5142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5143 "8348 More than one device on " 5144 "secondary bus found\n"); 5145 return -EBADSLT; 5146 } 5147 } 5148 } 5149 5150 return 0; 5151 } 5152 5153 /** 5154 * lpfc_info - Info entry point of scsi_host_template data structure 5155 * @host: The scsi host for which this call is being executed. 5156 * 5157 * This routine provides module information about hba. 5158 * 5159 * Reutrn code: 5160 * Pointer to char - Success. 5161 **/ 5162 const char * 5163 lpfc_info(struct Scsi_Host *host) 5164 { 5165 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5166 struct lpfc_hba *phba = vport->phba; 5167 int link_speed = 0; 5168 static char lpfcinfobuf[384]; 5169 char tmp[384] = {0}; 5170 5171 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5172 if (phba && phba->pcidev){ 5173 /* Model Description */ 5174 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5175 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5176 sizeof(lpfcinfobuf)) 5177 goto buffer_done; 5178 5179 /* PCI Info */ 5180 scnprintf(tmp, sizeof(tmp), 5181 " on PCI bus %02x device %02x irq %d", 5182 phba->pcidev->bus->number, phba->pcidev->devfn, 5183 phba->pcidev->irq); 5184 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5185 sizeof(lpfcinfobuf)) 5186 goto buffer_done; 5187 5188 /* Port Number */ 5189 if (phba->Port[0]) { 5190 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5191 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5192 sizeof(lpfcinfobuf)) 5193 goto buffer_done; 5194 } 5195 5196 /* Link Speed */ 5197 link_speed = lpfc_sli_port_speed_get(phba); 5198 if (link_speed != 0) { 5199 scnprintf(tmp, sizeof(tmp), 5200 " Logical Link Speed: %d Mbps", link_speed); 5201 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5202 sizeof(lpfcinfobuf)) 5203 goto buffer_done; 5204 } 5205 5206 /* PCI resettable */ 5207 if (!lpfc_check_pci_resettable(phba)) { 5208 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5209 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5210 } 5211 } 5212 5213 buffer_done: 5214 return lpfcinfobuf; 5215 } 5216 5217 /** 5218 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5219 * @phba: The Hba for which this call is being executed. 5220 * 5221 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5222 * The default value of cfg_poll_tmo is 10 milliseconds. 5223 **/ 5224 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5225 { 5226 unsigned long poll_tmo_expires = 5227 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5228 5229 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5230 mod_timer(&phba->fcp_poll_timer, 5231 poll_tmo_expires); 5232 } 5233 5234 /** 5235 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5236 * @phba: The Hba for which this call is being executed. 5237 * 5238 * This routine starts the fcp_poll_timer of @phba. 5239 **/ 5240 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5241 { 5242 lpfc_poll_rearm_timer(phba); 5243 } 5244 5245 /** 5246 * lpfc_poll_timeout - Restart polling timer 5247 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5248 * 5249 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5250 * and FCP Ring interrupt is disable. 5251 **/ 5252 void lpfc_poll_timeout(struct timer_list *t) 5253 { 5254 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5255 5256 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5257 lpfc_sli_handle_fast_ring_event(phba, 5258 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5259 5260 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5261 lpfc_poll_rearm_timer(phba); 5262 } 5263 } 5264 5265 /* 5266 * lpfc_is_command_vm_io - get the UUID from blk cgroup 5267 * @cmd: Pointer to scsi_cmnd data structure 5268 * Returns UUID if present, otherwise NULL 5269 */ 5270 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) 5271 { 5272 struct bio *bio = scsi_cmd_to_rq(cmd)->bio; 5273 5274 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio) 5275 return NULL; 5276 return blkcg_get_fc_appid(bio); 5277 } 5278 5279 /** 5280 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5281 * @shost: kernel scsi host pointer. 5282 * @cmnd: Pointer to scsi_cmnd data structure. 5283 * 5284 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5285 * This routine prepares an IOCB from scsi command and provides to firmware. 5286 * The @done callback is invoked after driver finished processing the command. 5287 * 5288 * Return value : 5289 * 0 - Success 5290 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5291 **/ 5292 static int 5293 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5294 { 5295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5296 struct lpfc_hba *phba = vport->phba; 5297 struct lpfc_iocbq *cur_iocbq = NULL; 5298 struct lpfc_rport_data *rdata; 5299 struct lpfc_nodelist *ndlp; 5300 struct lpfc_io_buf *lpfc_cmd; 5301 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5302 int err, idx; 5303 u8 *uuid = NULL; 5304 uint64_t start; 5305 5306 start = ktime_get_ns(); 5307 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5308 5309 /* sanity check on references */ 5310 if (unlikely(!rdata) || unlikely(!rport)) 5311 goto out_fail_command; 5312 5313 err = fc_remote_port_chkready(rport); 5314 if (err) { 5315 cmnd->result = err; 5316 goto out_fail_command; 5317 } 5318 ndlp = rdata->pnode; 5319 5320 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5321 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5322 5323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5324 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5325 " op:%02x str=%s without registering for" 5326 " BlockGuard - Rejecting command\n", 5327 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5328 dif_op_str[scsi_get_prot_op(cmnd)]); 5329 goto out_fail_command; 5330 } 5331 5332 /* 5333 * Catch race where our node has transitioned, but the 5334 * transport is still transitioning. 5335 */ 5336 if (!ndlp) 5337 goto out_tgt_busy1; 5338 5339 /* Check if IO qualifies for CMF */ 5340 if (phba->cmf_active_mode != LPFC_CFG_OFF && 5341 cmnd->sc_data_direction == DMA_FROM_DEVICE && 5342 (scsi_sg_count(cmnd))) { 5343 /* Latency start time saved in rx_cmd_start later in routine */ 5344 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); 5345 if (err) 5346 goto out_tgt_busy1; 5347 } 5348 5349 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5350 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5351 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5352 "3377 Target Queue Full, scsi Id:%d " 5353 "Qdepth:%d Pending command:%d" 5354 " WWNN:%02x:%02x:%02x:%02x:" 5355 "%02x:%02x:%02x:%02x, " 5356 " WWPN:%02x:%02x:%02x:%02x:" 5357 "%02x:%02x:%02x:%02x", 5358 ndlp->nlp_sid, ndlp->cmd_qdepth, 5359 atomic_read(&ndlp->cmd_pending), 5360 ndlp->nlp_nodename.u.wwn[0], 5361 ndlp->nlp_nodename.u.wwn[1], 5362 ndlp->nlp_nodename.u.wwn[2], 5363 ndlp->nlp_nodename.u.wwn[3], 5364 ndlp->nlp_nodename.u.wwn[4], 5365 ndlp->nlp_nodename.u.wwn[5], 5366 ndlp->nlp_nodename.u.wwn[6], 5367 ndlp->nlp_nodename.u.wwn[7], 5368 ndlp->nlp_portname.u.wwn[0], 5369 ndlp->nlp_portname.u.wwn[1], 5370 ndlp->nlp_portname.u.wwn[2], 5371 ndlp->nlp_portname.u.wwn[3], 5372 ndlp->nlp_portname.u.wwn[4], 5373 ndlp->nlp_portname.u.wwn[5], 5374 ndlp->nlp_portname.u.wwn[6], 5375 ndlp->nlp_portname.u.wwn[7]); 5376 goto out_tgt_busy2; 5377 } 5378 } 5379 5380 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5381 if (lpfc_cmd == NULL) { 5382 lpfc_rampdown_queue_depth(phba); 5383 5384 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5385 "0707 driver's buffer pool is empty, " 5386 "IO busied\n"); 5387 goto out_host_busy; 5388 } 5389 lpfc_cmd->rx_cmd_start = start; 5390 5391 cur_iocbq = &lpfc_cmd->cur_iocbq; 5392 /* 5393 * Store the midlayer's command structure for the completion phase 5394 * and complete the command initialization. 5395 */ 5396 lpfc_cmd->pCmd = cmnd; 5397 lpfc_cmd->rdata = rdata; 5398 lpfc_cmd->ndlp = ndlp; 5399 cur_iocbq->cmd_cmpl = NULL; 5400 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5401 5402 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5403 if (err) 5404 goto out_host_busy_release_buf; 5405 5406 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5407 if (vport->phba->cfg_enable_bg) { 5408 lpfc_printf_vlog(vport, 5409 KERN_INFO, LOG_SCSI_CMD, 5410 "9033 BLKGRD: rcvd %s cmd:x%x " 5411 "reftag x%x cnt %u pt %x\n", 5412 dif_op_str[scsi_get_prot_op(cmnd)], 5413 cmnd->cmnd[0], 5414 scsi_prot_ref_tag(cmnd), 5415 scsi_logical_block_count(cmnd), 5416 (cmnd->cmnd[1]>>5)); 5417 } 5418 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5419 } else { 5420 if (vport->phba->cfg_enable_bg) { 5421 lpfc_printf_vlog(vport, 5422 KERN_INFO, LOG_SCSI_CMD, 5423 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5424 "x%x reftag x%x cnt %u pt %x\n", 5425 cmnd->cmnd[0], 5426 scsi_prot_ref_tag(cmnd), 5427 scsi_logical_block_count(cmnd), 5428 (cmnd->cmnd[1]>>5)); 5429 } 5430 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5431 } 5432 5433 if (unlikely(err)) { 5434 if (err == 2) { 5435 cmnd->result = DID_ERROR << 16; 5436 goto out_fail_command_release_buf; 5437 } 5438 goto out_host_busy_free_buf; 5439 } 5440 5441 /* check the necessary and sufficient condition to support VMID */ 5442 if (lpfc_is_vmid_enabled(phba) && 5443 (ndlp->vmid_support || 5444 phba->pport->vmid_priority_tagging == 5445 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 5446 /* is the I/O generated by a VM, get the associated virtual */ 5447 /* entity id */ 5448 uuid = lpfc_is_command_vm_io(cmnd); 5449 5450 if (uuid) { 5451 err = lpfc_vmid_get_appid(vport, uuid, 5452 cmnd->sc_data_direction, 5453 (union lpfc_vmid_io_tag *) 5454 &cur_iocbq->vmid_tag); 5455 if (!err) 5456 cur_iocbq->cmd_flag |= LPFC_IO_VMID; 5457 } 5458 } 5459 atomic_inc(&ndlp->cmd_pending); 5460 5461 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5462 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5463 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5464 #endif 5465 /* Issue I/O to adapter */ 5466 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq, 5467 SLI_IOCB_RET_IOCB); 5468 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5469 if (start) { 5470 lpfc_cmd->ts_cmd_start = start; 5471 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5472 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5473 } else { 5474 lpfc_cmd->ts_cmd_start = 0; 5475 } 5476 #endif 5477 if (err) { 5478 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5479 "3376 FCP could not issue iocb err %x " 5480 "FCP cmd x%x <%d/%llu> " 5481 "sid: x%x did: x%x oxid: x%x " 5482 "Data: x%x x%x x%x x%x\n", 5483 err, cmnd->cmnd[0], 5484 cmnd->device ? cmnd->device->id : 0xffff, 5485 cmnd->device ? cmnd->device->lun : (u64)-1, 5486 vport->fc_myDID, ndlp->nlp_DID, 5487 phba->sli_rev == LPFC_SLI_REV4 ? 5488 cur_iocbq->sli4_xritag : 0xffff, 5489 phba->sli_rev == LPFC_SLI_REV4 ? 5490 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5491 cur_iocbq->iocb.ulpContext, 5492 cur_iocbq->iotag, 5493 phba->sli_rev == LPFC_SLI_REV4 ? 5494 bf_get(wqe_tmo, 5495 &cur_iocbq->wqe.generic.wqe_com) : 5496 cur_iocbq->iocb.ulpTimeout, 5497 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); 5498 5499 goto out_host_busy_free_buf; 5500 } 5501 5502 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5503 lpfc_sli_handle_fast_ring_event(phba, 5504 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5505 5506 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5507 lpfc_poll_rearm_timer(phba); 5508 } 5509 5510 if (phba->cfg_xri_rebalancing) 5511 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5512 5513 return 0; 5514 5515 out_host_busy_free_buf: 5516 idx = lpfc_cmd->hdwq_no; 5517 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5518 if (phba->sli4_hba.hdwq) { 5519 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5520 case WRITE_DATA: 5521 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5522 break; 5523 case READ_DATA: 5524 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5525 break; 5526 default: 5527 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5528 } 5529 } 5530 out_host_busy_release_buf: 5531 lpfc_release_scsi_buf(phba, lpfc_cmd); 5532 out_host_busy: 5533 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5534 shost); 5535 return SCSI_MLQUEUE_HOST_BUSY; 5536 5537 out_tgt_busy2: 5538 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5539 shost); 5540 out_tgt_busy1: 5541 return SCSI_MLQUEUE_TARGET_BUSY; 5542 5543 out_fail_command_release_buf: 5544 lpfc_release_scsi_buf(phba, lpfc_cmd); 5545 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5546 shost); 5547 5548 out_fail_command: 5549 scsi_done(cmnd); 5550 return 0; 5551 } 5552 5553 /* 5554 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport 5555 * @vport: The virtual port for which this call is being executed. 5556 */ 5557 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) 5558 { 5559 u32 bucket; 5560 struct lpfc_vmid *cur; 5561 5562 if (vport->port_type == LPFC_PHYSICAL_PORT) 5563 del_timer_sync(&vport->phba->inactive_vmid_poll); 5564 5565 kfree(vport->qfpa_res); 5566 kfree(vport->vmid_priority.vmid_range); 5567 kfree(vport->vmid); 5568 5569 if (!hash_empty(vport->hash_table)) 5570 hash_for_each(vport->hash_table, bucket, cur, hnode) 5571 hash_del(&cur->hnode); 5572 5573 vport->qfpa_res = NULL; 5574 vport->vmid_priority.vmid_range = NULL; 5575 vport->vmid = NULL; 5576 vport->cur_vmid_cnt = 0; 5577 } 5578 5579 /** 5580 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5581 * @cmnd: Pointer to scsi_cmnd data structure. 5582 * 5583 * This routine aborts @cmnd pending in base driver. 5584 * 5585 * Return code : 5586 * 0x2003 - Error 5587 * 0x2002 - Success 5588 **/ 5589 static int 5590 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5591 { 5592 struct Scsi_Host *shost = cmnd->device->host; 5593 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5594 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5595 struct lpfc_hba *phba = vport->phba; 5596 struct lpfc_iocbq *iocb; 5597 struct lpfc_io_buf *lpfc_cmd; 5598 int ret = SUCCESS, status = 0; 5599 struct lpfc_sli_ring *pring_s4 = NULL; 5600 struct lpfc_sli_ring *pring = NULL; 5601 int ret_val; 5602 unsigned long flags; 5603 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5604 5605 status = fc_block_rport(rport); 5606 if (status != 0 && status != SUCCESS) 5607 return status; 5608 5609 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5610 if (!lpfc_cmd) 5611 return ret; 5612 5613 /* Guard against IO completion being called at same time */ 5614 spin_lock_irqsave(&lpfc_cmd->buf_lock, flags); 5615 5616 spin_lock(&phba->hbalock); 5617 /* driver queued commands are in process of being flushed */ 5618 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5619 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5620 "3168 SCSI Layer abort requested I/O has been " 5621 "flushed by LLD.\n"); 5622 ret = FAILED; 5623 goto out_unlock_hba; 5624 } 5625 5626 if (!lpfc_cmd->pCmd) { 5627 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5628 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5629 "x%x ID %d LUN %llu\n", 5630 SUCCESS, cmnd->device->id, cmnd->device->lun); 5631 goto out_unlock_hba; 5632 } 5633 5634 iocb = &lpfc_cmd->cur_iocbq; 5635 if (phba->sli_rev == LPFC_SLI_REV4) { 5636 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5637 if (!pring_s4) { 5638 ret = FAILED; 5639 goto out_unlock_hba; 5640 } 5641 spin_lock(&pring_s4->ring_lock); 5642 } 5643 /* the command is in process of being cancelled */ 5644 if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) { 5645 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5646 "3169 SCSI Layer abort requested I/O has been " 5647 "cancelled by LLD.\n"); 5648 ret = FAILED; 5649 goto out_unlock_ring; 5650 } 5651 /* 5652 * If pCmd field of the corresponding lpfc_io_buf structure 5653 * points to a different SCSI command, then the driver has 5654 * already completed this command, but the midlayer did not 5655 * see the completion before the eh fired. Just return SUCCESS. 5656 */ 5657 if (lpfc_cmd->pCmd != cmnd) { 5658 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5659 "3170 SCSI Layer abort requested I/O has been " 5660 "completed by LLD.\n"); 5661 goto out_unlock_ring; 5662 } 5663 5664 WARN_ON(iocb->io_buf != lpfc_cmd); 5665 5666 /* abort issued in recovery is still in progress */ 5667 if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) { 5668 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5669 "3389 SCSI Layer I/O Abort Request is pending\n"); 5670 if (phba->sli_rev == LPFC_SLI_REV4) 5671 spin_unlock(&pring_s4->ring_lock); 5672 spin_unlock(&phba->hbalock); 5673 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5674 goto wait_for_cmpl; 5675 } 5676 5677 lpfc_cmd->waitq = &waitq; 5678 if (phba->sli_rev == LPFC_SLI_REV4) { 5679 spin_unlock(&pring_s4->ring_lock); 5680 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5681 lpfc_sli_abort_fcp_cmpl); 5682 } else { 5683 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5684 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5685 lpfc_sli_abort_fcp_cmpl); 5686 } 5687 5688 /* Make sure HBA is alive */ 5689 lpfc_issue_hb_tmo(phba); 5690 5691 if (ret_val != IOCB_SUCCESS) { 5692 /* Indicate the IO is not being aborted by the driver. */ 5693 lpfc_cmd->waitq = NULL; 5694 ret = FAILED; 5695 goto out_unlock_hba; 5696 } 5697 5698 /* no longer need the lock after this point */ 5699 spin_unlock(&phba->hbalock); 5700 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5701 5702 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5703 lpfc_sli_handle_fast_ring_event(phba, 5704 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5705 5706 wait_for_cmpl: 5707 /* 5708 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait 5709 * for abort to complete. 5710 */ 5711 wait_event_timeout(waitq, 5712 (lpfc_cmd->pCmd != cmnd), 5713 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5714 5715 spin_lock(&lpfc_cmd->buf_lock); 5716 5717 if (lpfc_cmd->pCmd == cmnd) { 5718 ret = FAILED; 5719 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5720 "0748 abort handler timed out waiting " 5721 "for aborting I/O (xri:x%x) to complete: " 5722 "ret %#x, ID %d, LUN %llu\n", 5723 iocb->sli4_xritag, ret, 5724 cmnd->device->id, cmnd->device->lun); 5725 } 5726 5727 lpfc_cmd->waitq = NULL; 5728 5729 spin_unlock(&lpfc_cmd->buf_lock); 5730 goto out; 5731 5732 out_unlock_ring: 5733 if (phba->sli_rev == LPFC_SLI_REV4) 5734 spin_unlock(&pring_s4->ring_lock); 5735 out_unlock_hba: 5736 spin_unlock(&phba->hbalock); 5737 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags); 5738 out: 5739 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5740 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 5741 "LUN %llu\n", ret, cmnd->device->id, 5742 cmnd->device->lun); 5743 return ret; 5744 } 5745 5746 static char * 5747 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 5748 { 5749 switch (task_mgmt_cmd) { 5750 case FCP_ABORT_TASK_SET: 5751 return "ABORT_TASK_SET"; 5752 case FCP_CLEAR_TASK_SET: 5753 return "FCP_CLEAR_TASK_SET"; 5754 case FCP_BUS_RESET: 5755 return "FCP_BUS_RESET"; 5756 case FCP_LUN_RESET: 5757 return "FCP_LUN_RESET"; 5758 case FCP_TARGET_RESET: 5759 return "FCP_TARGET_RESET"; 5760 case FCP_CLEAR_ACA: 5761 return "FCP_CLEAR_ACA"; 5762 case FCP_TERMINATE_TASK: 5763 return "FCP_TERMINATE_TASK"; 5764 default: 5765 return "unknown"; 5766 } 5767 } 5768 5769 5770 /** 5771 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 5772 * @vport: The virtual port for which this call is being executed. 5773 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5774 * 5775 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 5776 * 5777 * Return code : 5778 * 0x2003 - Error 5779 * 0x2002 - Success 5780 **/ 5781 static int 5782 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 5783 { 5784 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 5785 uint32_t rsp_info; 5786 uint32_t rsp_len; 5787 uint8_t rsp_info_code; 5788 int ret = FAILED; 5789 5790 5791 if (fcprsp == NULL) 5792 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5793 "0703 fcp_rsp is missing\n"); 5794 else { 5795 rsp_info = fcprsp->rspStatus2; 5796 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 5797 rsp_info_code = fcprsp->rspInfo3; 5798 5799 5800 lpfc_printf_vlog(vport, KERN_INFO, 5801 LOG_FCP, 5802 "0706 fcp_rsp valid 0x%x," 5803 " rsp len=%d code 0x%x\n", 5804 rsp_info, 5805 rsp_len, rsp_info_code); 5806 5807 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 5808 * field specifies the number of valid bytes of FCP_RSP_INFO. 5809 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 5810 */ 5811 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 5812 ((rsp_len == 8) || (rsp_len == 4))) { 5813 switch (rsp_info_code) { 5814 case RSP_NO_FAILURE: 5815 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5816 "0715 Task Mgmt No Failure\n"); 5817 ret = SUCCESS; 5818 break; 5819 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5820 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5821 "0716 Task Mgmt Target " 5822 "reject\n"); 5823 break; 5824 case RSP_TM_NOT_COMPLETED: /* TM failed */ 5825 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5826 "0717 Task Mgmt Target " 5827 "failed TM\n"); 5828 break; 5829 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5830 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5831 "0718 Task Mgmt to invalid " 5832 "LUN\n"); 5833 break; 5834 } 5835 } 5836 } 5837 return ret; 5838 } 5839 5840 5841 /** 5842 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 5843 * @vport: The virtual port for which this call is being executed. 5844 * @rport: Pointer to remote port 5845 * @tgt_id: Target ID of remote device. 5846 * @lun_id: Lun number for the TMF 5847 * @task_mgmt_cmd: type of TMF to send 5848 * 5849 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 5850 * a remote port. 5851 * 5852 * Return Code: 5853 * 0x2003 - Error 5854 * 0x2002 - Success. 5855 **/ 5856 static int 5857 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, 5858 unsigned int tgt_id, uint64_t lun_id, 5859 uint8_t task_mgmt_cmd) 5860 { 5861 struct lpfc_hba *phba = vport->phba; 5862 struct lpfc_io_buf *lpfc_cmd; 5863 struct lpfc_iocbq *iocbq; 5864 struct lpfc_iocbq *iocbqrsp; 5865 struct lpfc_rport_data *rdata; 5866 struct lpfc_nodelist *pnode; 5867 int ret; 5868 int status; 5869 5870 rdata = rport->dd_data; 5871 if (!rdata || !rdata->pnode) 5872 return FAILED; 5873 pnode = rdata->pnode; 5874 5875 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL); 5876 if (lpfc_cmd == NULL) 5877 return FAILED; 5878 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 5879 lpfc_cmd->rdata = rdata; 5880 lpfc_cmd->pCmd = NULL; 5881 lpfc_cmd->ndlp = pnode; 5882 5883 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 5884 task_mgmt_cmd); 5885 if (!status) { 5886 lpfc_release_scsi_buf(phba, lpfc_cmd); 5887 return FAILED; 5888 } 5889 5890 iocbq = &lpfc_cmd->cur_iocbq; 5891 iocbqrsp = lpfc_sli_get_iocbq(phba); 5892 if (iocbqrsp == NULL) { 5893 lpfc_release_scsi_buf(phba, lpfc_cmd); 5894 return FAILED; 5895 } 5896 iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl; 5897 iocbq->vport = vport; 5898 5899 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5900 "0702 Issue %s to TGT %d LUN %llu " 5901 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 5902 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 5903 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 5904 iocbq->cmd_flag); 5905 5906 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 5907 iocbq, iocbqrsp, lpfc_cmd->timeout); 5908 if ((status != IOCB_SUCCESS) || 5909 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) { 5910 if (status != IOCB_SUCCESS || 5911 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR) 5912 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5913 "0727 TMF %s to TGT %d LUN %llu " 5914 "failed (%d, %d) cmd_flag x%x\n", 5915 lpfc_taskmgmt_name(task_mgmt_cmd), 5916 tgt_id, lun_id, 5917 get_job_ulpstatus(phba, iocbqrsp), 5918 get_job_word4(phba, iocbqrsp), 5919 iocbq->cmd_flag); 5920 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5921 if (status == IOCB_SUCCESS) { 5922 if (get_job_ulpstatus(phba, iocbqrsp) == 5923 IOSTAT_FCP_RSP_ERROR) 5924 /* Something in the FCP_RSP was invalid. 5925 * Check conditions */ 5926 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 5927 else 5928 ret = FAILED; 5929 } else if ((status == IOCB_TIMEDOUT) || 5930 (status == IOCB_ABORTED)) { 5931 ret = TIMEOUT_ERROR; 5932 } else { 5933 ret = FAILED; 5934 } 5935 } else 5936 ret = SUCCESS; 5937 5938 lpfc_sli_release_iocbq(phba, iocbqrsp); 5939 5940 if (status != IOCB_TIMEDOUT) 5941 lpfc_release_scsi_buf(phba, lpfc_cmd); 5942 5943 return ret; 5944 } 5945 5946 /** 5947 * lpfc_chk_tgt_mapped - 5948 * @vport: The virtual port to check on 5949 * @rport: Pointer to fc_rport data structure. 5950 * 5951 * This routine delays until the scsi target (aka rport) for the 5952 * command exists (is present and logged in) or we declare it non-existent. 5953 * 5954 * Return code : 5955 * 0x2003 - Error 5956 * 0x2002 - Success 5957 **/ 5958 static int 5959 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport) 5960 { 5961 struct lpfc_rport_data *rdata; 5962 struct lpfc_nodelist *pnode = NULL; 5963 unsigned long later; 5964 5965 rdata = rport->dd_data; 5966 if (!rdata) { 5967 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5968 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 5969 return FAILED; 5970 } 5971 pnode = rdata->pnode; 5972 5973 /* 5974 * If target is not in a MAPPED state, delay until 5975 * target is rediscovered or devloss timeout expires. 5976 */ 5977 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5978 while (time_after(later, jiffies)) { 5979 if (!pnode) 5980 return FAILED; 5981 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5982 return SUCCESS; 5983 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5984 rdata = rport->dd_data; 5985 if (!rdata) 5986 return FAILED; 5987 pnode = rdata->pnode; 5988 } 5989 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 5990 return FAILED; 5991 return SUCCESS; 5992 } 5993 5994 /** 5995 * lpfc_reset_flush_io_context - 5996 * @vport: The virtual port (scsi_host) for the flush context 5997 * @tgt_id: If aborting by Target contect - specifies the target id 5998 * @lun_id: If aborting by Lun context - specifies the lun id 5999 * @context: specifies the context level to flush at. 6000 * 6001 * After a reset condition via TMF, we need to flush orphaned i/o 6002 * contexts from the adapter. This routine aborts any contexts 6003 * outstanding, then waits for their completions. The wait is 6004 * bounded by devloss_tmo though. 6005 * 6006 * Return code : 6007 * 0x2003 - Error 6008 * 0x2002 - Success 6009 **/ 6010 static int 6011 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 6012 uint64_t lun_id, lpfc_ctx_cmd context) 6013 { 6014 struct lpfc_hba *phba = vport->phba; 6015 unsigned long later; 6016 int cnt; 6017 6018 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6019 if (cnt) 6020 lpfc_sli_abort_taskmgmt(vport, 6021 &phba->sli.sli3_ring[LPFC_FCP_RING], 6022 tgt_id, lun_id, context); 6023 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6024 while (time_after(later, jiffies) && cnt) { 6025 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 6026 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6027 } 6028 if (cnt) { 6029 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6030 "0724 I/O flush failure for context %s : cnt x%x\n", 6031 ((context == LPFC_CTX_LUN) ? "LUN" : 6032 ((context == LPFC_CTX_TGT) ? "TGT" : 6033 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 6034 cnt); 6035 return FAILED; 6036 } 6037 return SUCCESS; 6038 } 6039 6040 /** 6041 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 6042 * @cmnd: Pointer to scsi_cmnd data structure. 6043 * 6044 * This routine does a device reset by sending a LUN_RESET task management 6045 * command. 6046 * 6047 * Return code : 6048 * 0x2003 - Error 6049 * 0x2002 - Success 6050 **/ 6051 static int 6052 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 6053 { 6054 struct Scsi_Host *shost = cmnd->device->host; 6055 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 6056 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6057 struct lpfc_rport_data *rdata; 6058 struct lpfc_nodelist *pnode; 6059 unsigned tgt_id = cmnd->device->id; 6060 uint64_t lun_id = cmnd->device->lun; 6061 struct lpfc_scsi_event_header scsi_event; 6062 int status; 6063 u32 logit = LOG_FCP; 6064 6065 if (!rport) 6066 return FAILED; 6067 6068 rdata = rport->dd_data; 6069 if (!rdata || !rdata->pnode) { 6070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6071 "0798 Device Reset rdata failure: rdata x%px\n", 6072 rdata); 6073 return FAILED; 6074 } 6075 pnode = rdata->pnode; 6076 status = fc_block_rport(rport); 6077 if (status != 0 && status != SUCCESS) 6078 return status; 6079 6080 status = lpfc_chk_tgt_mapped(vport, rport); 6081 if (status == FAILED) { 6082 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6083 "0721 Device Reset rport failure: rdata x%px\n", rdata); 6084 return FAILED; 6085 } 6086 6087 scsi_event.event_type = FC_REG_SCSI_EVENT; 6088 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 6089 scsi_event.lun = lun_id; 6090 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6091 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6092 6093 fc_host_post_vendor_event(shost, fc_get_event_number(), 6094 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6095 6096 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, 6097 FCP_LUN_RESET); 6098 if (status != SUCCESS) 6099 logit = LOG_TRACE_EVENT; 6100 6101 lpfc_printf_vlog(vport, KERN_ERR, logit, 6102 "0713 SCSI layer issued Device Reset (%d, %llu) " 6103 "return x%x\n", tgt_id, lun_id, status); 6104 6105 /* 6106 * We have to clean up i/o as : they may be orphaned by the TMF; 6107 * or if the TMF failed, they may be in an indeterminate state. 6108 * So, continue on. 6109 * We will report success if all the i/o aborts successfully. 6110 */ 6111 if (status == SUCCESS) 6112 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6113 LPFC_CTX_LUN); 6114 6115 return status; 6116 } 6117 6118 /** 6119 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 6120 * @cmnd: Pointer to scsi_cmnd data structure. 6121 * 6122 * This routine does a target reset by sending a TARGET_RESET task management 6123 * command. 6124 * 6125 * Return code : 6126 * 0x2003 - Error 6127 * 0x2002 - Success 6128 **/ 6129 static int 6130 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 6131 { 6132 struct Scsi_Host *shost = cmnd->device->host; 6133 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 6134 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6135 struct lpfc_rport_data *rdata; 6136 struct lpfc_nodelist *pnode; 6137 unsigned tgt_id = cmnd->device->id; 6138 uint64_t lun_id = cmnd->device->lun; 6139 struct lpfc_scsi_event_header scsi_event; 6140 int status; 6141 u32 logit = LOG_FCP; 6142 u32 dev_loss_tmo = vport->cfg_devloss_tmo; 6143 unsigned long flags; 6144 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 6145 6146 if (!rport) 6147 return FAILED; 6148 6149 rdata = rport->dd_data; 6150 if (!rdata || !rdata->pnode) { 6151 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6152 "0799 Target Reset rdata failure: rdata x%px\n", 6153 rdata); 6154 return FAILED; 6155 } 6156 pnode = rdata->pnode; 6157 status = fc_block_rport(rport); 6158 if (status != 0 && status != SUCCESS) 6159 return status; 6160 6161 status = lpfc_chk_tgt_mapped(vport, rport); 6162 if (status == FAILED) { 6163 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6164 "0722 Target Reset rport failure: rdata x%px\n", rdata); 6165 if (pnode) { 6166 spin_lock_irqsave(&pnode->lock, flags); 6167 pnode->nlp_flag &= ~NLP_NPR_ADISC; 6168 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6169 spin_unlock_irqrestore(&pnode->lock, flags); 6170 } 6171 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6172 LPFC_CTX_TGT); 6173 return FAST_IO_FAIL; 6174 } 6175 6176 scsi_event.event_type = FC_REG_SCSI_EVENT; 6177 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 6178 scsi_event.lun = 0; 6179 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6180 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6181 6182 fc_host_post_vendor_event(shost, fc_get_event_number(), 6183 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6184 6185 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id, 6186 FCP_TARGET_RESET); 6187 if (status != SUCCESS) { 6188 logit = LOG_TRACE_EVENT; 6189 6190 /* Issue LOGO, if no LOGO is outstanding */ 6191 spin_lock_irqsave(&pnode->lock, flags); 6192 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) && 6193 !pnode->logo_waitq) { 6194 pnode->logo_waitq = &waitq; 6195 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6196 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6197 pnode->save_flags |= NLP_WAIT_FOR_LOGO; 6198 spin_unlock_irqrestore(&pnode->lock, flags); 6199 lpfc_unreg_rpi(vport, pnode); 6200 wait_event_timeout(waitq, 6201 (!(pnode->save_flags & 6202 NLP_WAIT_FOR_LOGO)), 6203 msecs_to_jiffies(dev_loss_tmo * 6204 1000)); 6205 6206 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) { 6207 lpfc_printf_vlog(vport, KERN_ERR, logit, 6208 "0725 SCSI layer TGTRST " 6209 "failed & LOGO TMO (%d, %llu) " 6210 "return x%x\n", 6211 tgt_id, lun_id, status); 6212 spin_lock_irqsave(&pnode->lock, flags); 6213 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO; 6214 } else { 6215 spin_lock_irqsave(&pnode->lock, flags); 6216 } 6217 pnode->logo_waitq = NULL; 6218 spin_unlock_irqrestore(&pnode->lock, flags); 6219 status = SUCCESS; 6220 6221 } else { 6222 spin_unlock_irqrestore(&pnode->lock, flags); 6223 status = FAILED; 6224 } 6225 } 6226 6227 lpfc_printf_vlog(vport, KERN_ERR, logit, 6228 "0723 SCSI layer issued Target Reset (%d, %llu) " 6229 "return x%x\n", tgt_id, lun_id, status); 6230 6231 /* 6232 * We have to clean up i/o as : they may be orphaned by the TMF; 6233 * or if the TMF failed, they may be in an indeterminate state. 6234 * So, continue on. 6235 * We will report success if all the i/o aborts successfully. 6236 */ 6237 if (status == SUCCESS) 6238 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6239 LPFC_CTX_TGT); 6240 return status; 6241 } 6242 6243 /** 6244 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6245 * @cmnd: Pointer to scsi_cmnd data structure. 6246 * 6247 * This routine does host reset to the adaptor port. It brings the HBA 6248 * offline, performs a board restart, and then brings the board back online. 6249 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6250 * reject all outstanding SCSI commands to the host and error returned 6251 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6252 * of error handling, it will only return error if resetting of the adapter 6253 * is not successful; in all other cases, will return success. 6254 * 6255 * Return code : 6256 * 0x2003 - Error 6257 * 0x2002 - Success 6258 **/ 6259 static int 6260 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6261 { 6262 struct Scsi_Host *shost = cmnd->device->host; 6263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6264 struct lpfc_hba *phba = vport->phba; 6265 int rc, ret = SUCCESS; 6266 6267 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6268 "3172 SCSI layer issued Host Reset Data:\n"); 6269 6270 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6271 lpfc_offline(phba); 6272 rc = lpfc_sli_brdrestart(phba); 6273 if (rc) 6274 goto error; 6275 6276 /* Wait for successful restart of adapter */ 6277 if (phba->sli_rev < LPFC_SLI_REV4) { 6278 rc = lpfc_sli_chipset_init(phba); 6279 if (rc) 6280 goto error; 6281 } 6282 6283 rc = lpfc_online(phba); 6284 if (rc) 6285 goto error; 6286 6287 lpfc_unblock_mgmt_io(phba); 6288 6289 return ret; 6290 error: 6291 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6292 "3323 Failed host reset\n"); 6293 lpfc_unblock_mgmt_io(phba); 6294 return FAILED; 6295 } 6296 6297 /** 6298 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6299 * @sdev: Pointer to scsi_device. 6300 * 6301 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6302 * globally available list of scsi buffers. This routine also makes sure scsi 6303 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6304 * of scsi buffer exists for the lifetime of the driver. 6305 * 6306 * Return codes: 6307 * non-0 - Error 6308 * 0 - Success 6309 **/ 6310 static int 6311 lpfc_slave_alloc(struct scsi_device *sdev) 6312 { 6313 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6314 struct lpfc_hba *phba = vport->phba; 6315 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6316 uint32_t total = 0; 6317 uint32_t num_to_alloc = 0; 6318 int num_allocated = 0; 6319 uint32_t sdev_cnt; 6320 struct lpfc_device_data *device_data; 6321 unsigned long flags; 6322 struct lpfc_name target_wwpn; 6323 6324 if (!rport || fc_remote_port_chkready(rport)) 6325 return -ENXIO; 6326 6327 if (phba->cfg_fof) { 6328 6329 /* 6330 * Check to see if the device data structure for the lun 6331 * exists. If not, create one. 6332 */ 6333 6334 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6335 spin_lock_irqsave(&phba->devicelock, flags); 6336 device_data = __lpfc_get_device_data(phba, 6337 &phba->luns, 6338 &vport->fc_portname, 6339 &target_wwpn, 6340 sdev->lun); 6341 if (!device_data) { 6342 spin_unlock_irqrestore(&phba->devicelock, flags); 6343 device_data = lpfc_create_device_data(phba, 6344 &vport->fc_portname, 6345 &target_wwpn, 6346 sdev->lun, 6347 phba->cfg_XLanePriority, 6348 true); 6349 if (!device_data) 6350 return -ENOMEM; 6351 spin_lock_irqsave(&phba->devicelock, flags); 6352 list_add_tail(&device_data->listentry, &phba->luns); 6353 } 6354 device_data->rport_data = rport->dd_data; 6355 device_data->available = true; 6356 spin_unlock_irqrestore(&phba->devicelock, flags); 6357 sdev->hostdata = device_data; 6358 } else { 6359 sdev->hostdata = rport->dd_data; 6360 } 6361 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6362 6363 /* For SLI4, all IO buffers are pre-allocated */ 6364 if (phba->sli_rev == LPFC_SLI_REV4) 6365 return 0; 6366 6367 /* This code path is now ONLY for SLI3 adapters */ 6368 6369 /* 6370 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6371 * available list of scsi buffers. Don't allocate more than the 6372 * HBA limit conveyed to the midlayer via the host structure. The 6373 * formula accounts for the lun_queue_depth + error handlers + 1 6374 * extra. This list of scsi bufs exists for the lifetime of the driver. 6375 */ 6376 total = phba->total_scsi_bufs; 6377 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6378 6379 /* If allocated buffers are enough do nothing */ 6380 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6381 return 0; 6382 6383 /* Allow some exchanges to be available always to complete discovery */ 6384 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6386 "0704 At limitation of %d preallocated " 6387 "command buffers\n", total); 6388 return 0; 6389 /* Allow some exchanges to be available always to complete discovery */ 6390 } else if (total + num_to_alloc > 6391 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6392 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6393 "0705 Allocation request of %d " 6394 "command buffers will exceed max of %d. " 6395 "Reducing allocation request to %d.\n", 6396 num_to_alloc, phba->cfg_hba_queue_depth, 6397 (phba->cfg_hba_queue_depth - total)); 6398 num_to_alloc = phba->cfg_hba_queue_depth - total; 6399 } 6400 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6401 if (num_to_alloc != num_allocated) { 6402 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6403 "0708 Allocation request of %d " 6404 "command buffers did not succeed. " 6405 "Allocated %d buffers.\n", 6406 num_to_alloc, num_allocated); 6407 } 6408 if (num_allocated > 0) 6409 phba->total_scsi_bufs += num_allocated; 6410 return 0; 6411 } 6412 6413 /** 6414 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6415 * @sdev: Pointer to scsi_device. 6416 * 6417 * This routine configures following items 6418 * - Tag command queuing support for @sdev if supported. 6419 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6420 * 6421 * Return codes: 6422 * 0 - Success 6423 **/ 6424 static int 6425 lpfc_slave_configure(struct scsi_device *sdev) 6426 { 6427 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6428 struct lpfc_hba *phba = vport->phba; 6429 6430 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6431 6432 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6433 lpfc_sli_handle_fast_ring_event(phba, 6434 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6435 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6436 lpfc_poll_rearm_timer(phba); 6437 } 6438 6439 return 0; 6440 } 6441 6442 /** 6443 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6444 * @sdev: Pointer to scsi_device. 6445 * 6446 * This routine sets @sdev hostatdata filed to null. 6447 **/ 6448 static void 6449 lpfc_slave_destroy(struct scsi_device *sdev) 6450 { 6451 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6452 struct lpfc_hba *phba = vport->phba; 6453 unsigned long flags; 6454 struct lpfc_device_data *device_data = sdev->hostdata; 6455 6456 atomic_dec(&phba->sdev_cnt); 6457 if ((phba->cfg_fof) && (device_data)) { 6458 spin_lock_irqsave(&phba->devicelock, flags); 6459 device_data->available = false; 6460 if (!device_data->oas_enabled) 6461 lpfc_delete_device_data(phba, device_data); 6462 spin_unlock_irqrestore(&phba->devicelock, flags); 6463 } 6464 sdev->hostdata = NULL; 6465 return; 6466 } 6467 6468 /** 6469 * lpfc_create_device_data - creates and initializes device data structure for OAS 6470 * @phba: Pointer to host bus adapter structure. 6471 * @vport_wwpn: Pointer to vport's wwpn information 6472 * @target_wwpn: Pointer to target's wwpn information 6473 * @lun: Lun on target 6474 * @pri: Priority 6475 * @atomic_create: Flag to indicate if memory should be allocated using the 6476 * GFP_ATOMIC flag or not. 6477 * 6478 * This routine creates a device data structure which will contain identifying 6479 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6480 * whether or not the corresponding lun is available by the system, 6481 * and pointer to the rport data. 6482 * 6483 * Return codes: 6484 * NULL - Error 6485 * Pointer to lpfc_device_data - Success 6486 **/ 6487 struct lpfc_device_data* 6488 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6489 struct lpfc_name *target_wwpn, uint64_t lun, 6490 uint32_t pri, bool atomic_create) 6491 { 6492 6493 struct lpfc_device_data *lun_info; 6494 int memory_flags; 6495 6496 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6497 !(phba->cfg_fof)) 6498 return NULL; 6499 6500 /* Attempt to create the device data to contain lun info */ 6501 6502 if (atomic_create) 6503 memory_flags = GFP_ATOMIC; 6504 else 6505 memory_flags = GFP_KERNEL; 6506 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6507 if (!lun_info) 6508 return NULL; 6509 INIT_LIST_HEAD(&lun_info->listentry); 6510 lun_info->rport_data = NULL; 6511 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6512 sizeof(struct lpfc_name)); 6513 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6514 sizeof(struct lpfc_name)); 6515 lun_info->device_id.lun = lun; 6516 lun_info->oas_enabled = false; 6517 lun_info->priority = pri; 6518 lun_info->available = false; 6519 return lun_info; 6520 } 6521 6522 /** 6523 * lpfc_delete_device_data - frees a device data structure for OAS 6524 * @phba: Pointer to host bus adapter structure. 6525 * @lun_info: Pointer to device data structure to free. 6526 * 6527 * This routine frees the previously allocated device data structure passed. 6528 * 6529 **/ 6530 void 6531 lpfc_delete_device_data(struct lpfc_hba *phba, 6532 struct lpfc_device_data *lun_info) 6533 { 6534 6535 if (unlikely(!phba) || !lun_info || 6536 !(phba->cfg_fof)) 6537 return; 6538 6539 if (!list_empty(&lun_info->listentry)) 6540 list_del(&lun_info->listentry); 6541 mempool_free(lun_info, phba->device_data_mem_pool); 6542 return; 6543 } 6544 6545 /** 6546 * __lpfc_get_device_data - returns the device data for the specified lun 6547 * @phba: Pointer to host bus adapter structure. 6548 * @list: Point to list to search. 6549 * @vport_wwpn: Pointer to vport's wwpn information 6550 * @target_wwpn: Pointer to target's wwpn information 6551 * @lun: Lun on target 6552 * 6553 * This routine searches the list passed for the specified lun's device data. 6554 * This function does not hold locks, it is the responsibility of the caller 6555 * to ensure the proper lock is held before calling the function. 6556 * 6557 * Return codes: 6558 * NULL - Error 6559 * Pointer to lpfc_device_data - Success 6560 **/ 6561 struct lpfc_device_data* 6562 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6563 struct lpfc_name *vport_wwpn, 6564 struct lpfc_name *target_wwpn, uint64_t lun) 6565 { 6566 6567 struct lpfc_device_data *lun_info; 6568 6569 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6570 !phba->cfg_fof) 6571 return NULL; 6572 6573 /* Check to see if the lun is already enabled for OAS. */ 6574 6575 list_for_each_entry(lun_info, list, listentry) { 6576 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6577 sizeof(struct lpfc_name)) == 0) && 6578 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6579 sizeof(struct lpfc_name)) == 0) && 6580 (lun_info->device_id.lun == lun)) 6581 return lun_info; 6582 } 6583 6584 return NULL; 6585 } 6586 6587 /** 6588 * lpfc_find_next_oas_lun - searches for the next oas lun 6589 * @phba: Pointer to host bus adapter structure. 6590 * @vport_wwpn: Pointer to vport's wwpn information 6591 * @target_wwpn: Pointer to target's wwpn information 6592 * @starting_lun: Pointer to the lun to start searching for 6593 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6594 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6595 * @found_lun: Pointer to the found lun. 6596 * @found_lun_status: Pointer to status of the found lun. 6597 * @found_lun_pri: Pointer to priority of the found lun. 6598 * 6599 * This routine searches the luns list for the specified lun 6600 * or the first lun for the vport/target. If the vport wwpn contains 6601 * a zero value then a specific vport is not specified. In this case 6602 * any vport which contains the lun will be considered a match. If the 6603 * target wwpn contains a zero value then a specific target is not specified. 6604 * In this case any target which contains the lun will be considered a 6605 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6606 * are returned. The function will also return the next lun if available. 6607 * If the next lun is not found, starting_lun parameter will be set to 6608 * NO_MORE_OAS_LUN. 6609 * 6610 * Return codes: 6611 * non-0 - Error 6612 * 0 - Success 6613 **/ 6614 bool 6615 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6616 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6617 struct lpfc_name *found_vport_wwpn, 6618 struct lpfc_name *found_target_wwpn, 6619 uint64_t *found_lun, 6620 uint32_t *found_lun_status, 6621 uint32_t *found_lun_pri) 6622 { 6623 6624 unsigned long flags; 6625 struct lpfc_device_data *lun_info; 6626 struct lpfc_device_id *device_id; 6627 uint64_t lun; 6628 bool found = false; 6629 6630 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6631 !starting_lun || !found_vport_wwpn || 6632 !found_target_wwpn || !found_lun || !found_lun_status || 6633 (*starting_lun == NO_MORE_OAS_LUN) || 6634 !phba->cfg_fof) 6635 return false; 6636 6637 lun = *starting_lun; 6638 *found_lun = NO_MORE_OAS_LUN; 6639 *starting_lun = NO_MORE_OAS_LUN; 6640 6641 /* Search for lun or the lun closet in value */ 6642 6643 spin_lock_irqsave(&phba->devicelock, flags); 6644 list_for_each_entry(lun_info, &phba->luns, listentry) { 6645 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6646 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6647 sizeof(struct lpfc_name)) == 0)) && 6648 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 6649 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6650 sizeof(struct lpfc_name)) == 0)) && 6651 (lun_info->oas_enabled)) { 6652 device_id = &lun_info->device_id; 6653 if ((!found) && 6654 ((lun == FIND_FIRST_OAS_LUN) || 6655 (device_id->lun == lun))) { 6656 *found_lun = device_id->lun; 6657 memcpy(found_vport_wwpn, 6658 &device_id->vport_wwpn, 6659 sizeof(struct lpfc_name)); 6660 memcpy(found_target_wwpn, 6661 &device_id->target_wwpn, 6662 sizeof(struct lpfc_name)); 6663 if (lun_info->available) 6664 *found_lun_status = 6665 OAS_LUN_STATUS_EXISTS; 6666 else 6667 *found_lun_status = 0; 6668 *found_lun_pri = lun_info->priority; 6669 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 6670 memset(vport_wwpn, 0x0, 6671 sizeof(struct lpfc_name)); 6672 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 6673 memset(target_wwpn, 0x0, 6674 sizeof(struct lpfc_name)); 6675 found = true; 6676 } else if (found) { 6677 *starting_lun = device_id->lun; 6678 memcpy(vport_wwpn, &device_id->vport_wwpn, 6679 sizeof(struct lpfc_name)); 6680 memcpy(target_wwpn, &device_id->target_wwpn, 6681 sizeof(struct lpfc_name)); 6682 break; 6683 } 6684 } 6685 } 6686 spin_unlock_irqrestore(&phba->devicelock, flags); 6687 return found; 6688 } 6689 6690 /** 6691 * lpfc_enable_oas_lun - enables a lun for OAS operations 6692 * @phba: Pointer to host bus adapter structure. 6693 * @vport_wwpn: Pointer to vport's wwpn information 6694 * @target_wwpn: Pointer to target's wwpn information 6695 * @lun: Lun 6696 * @pri: Priority 6697 * 6698 * This routine enables a lun for oas operations. The routines does so by 6699 * doing the following : 6700 * 6701 * 1) Checks to see if the device data for the lun has been created. 6702 * 2) If found, sets the OAS enabled flag if not set and returns. 6703 * 3) Otherwise, creates a device data structure. 6704 * 4) If successfully created, indicates the device data is for an OAS lun, 6705 * indicates the lun is not available and add to the list of luns. 6706 * 6707 * Return codes: 6708 * false - Error 6709 * true - Success 6710 **/ 6711 bool 6712 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6713 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6714 { 6715 6716 struct lpfc_device_data *lun_info; 6717 unsigned long flags; 6718 6719 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6720 !phba->cfg_fof) 6721 return false; 6722 6723 spin_lock_irqsave(&phba->devicelock, flags); 6724 6725 /* Check to see if the device data for the lun has been created */ 6726 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 6727 target_wwpn, lun); 6728 if (lun_info) { 6729 if (!lun_info->oas_enabled) 6730 lun_info->oas_enabled = true; 6731 lun_info->priority = pri; 6732 spin_unlock_irqrestore(&phba->devicelock, flags); 6733 return true; 6734 } 6735 6736 /* Create an lun info structure and add to list of luns */ 6737 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 6738 pri, true); 6739 if (lun_info) { 6740 lun_info->oas_enabled = true; 6741 lun_info->priority = pri; 6742 lun_info->available = false; 6743 list_add_tail(&lun_info->listentry, &phba->luns); 6744 spin_unlock_irqrestore(&phba->devicelock, flags); 6745 return true; 6746 } 6747 spin_unlock_irqrestore(&phba->devicelock, flags); 6748 return false; 6749 } 6750 6751 /** 6752 * lpfc_disable_oas_lun - disables a lun for OAS operations 6753 * @phba: Pointer to host bus adapter structure. 6754 * @vport_wwpn: Pointer to vport's wwpn information 6755 * @target_wwpn: Pointer to target's wwpn information 6756 * @lun: Lun 6757 * @pri: Priority 6758 * 6759 * This routine disables a lun for oas operations. The routines does so by 6760 * doing the following : 6761 * 6762 * 1) Checks to see if the device data for the lun is created. 6763 * 2) If present, clears the flag indicating this lun is for OAS. 6764 * 3) If the lun is not available by the system, the device data is 6765 * freed. 6766 * 6767 * Return codes: 6768 * false - Error 6769 * true - Success 6770 **/ 6771 bool 6772 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6773 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6774 { 6775 6776 struct lpfc_device_data *lun_info; 6777 unsigned long flags; 6778 6779 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6780 !phba->cfg_fof) 6781 return false; 6782 6783 spin_lock_irqsave(&phba->devicelock, flags); 6784 6785 /* Check to see if the lun is available. */ 6786 lun_info = __lpfc_get_device_data(phba, 6787 &phba->luns, vport_wwpn, 6788 target_wwpn, lun); 6789 if (lun_info) { 6790 lun_info->oas_enabled = false; 6791 lun_info->priority = pri; 6792 if (!lun_info->available) 6793 lpfc_delete_device_data(phba, lun_info); 6794 spin_unlock_irqrestore(&phba->devicelock, flags); 6795 return true; 6796 } 6797 6798 spin_unlock_irqrestore(&phba->devicelock, flags); 6799 return false; 6800 } 6801 6802 static int 6803 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 6804 { 6805 return SCSI_MLQUEUE_HOST_BUSY; 6806 } 6807 6808 static int 6809 lpfc_no_slave(struct scsi_device *sdev) 6810 { 6811 return -ENODEV; 6812 } 6813 6814 struct scsi_host_template lpfc_template_nvme = { 6815 .module = THIS_MODULE, 6816 .name = LPFC_DRIVER_NAME, 6817 .proc_name = LPFC_DRIVER_NAME, 6818 .info = lpfc_info, 6819 .queuecommand = lpfc_no_command, 6820 .slave_alloc = lpfc_no_slave, 6821 .slave_configure = lpfc_no_slave, 6822 .scan_finished = lpfc_scan_finished, 6823 .this_id = -1, 6824 .sg_tablesize = 1, 6825 .cmd_per_lun = 1, 6826 .shost_groups = lpfc_hba_groups, 6827 .max_sectors = 0xFFFFFFFF, 6828 .vendor_id = LPFC_NL_VENDOR_ID, 6829 .track_queue_depth = 0, 6830 }; 6831 6832 struct scsi_host_template lpfc_template = { 6833 .module = THIS_MODULE, 6834 .name = LPFC_DRIVER_NAME, 6835 .proc_name = LPFC_DRIVER_NAME, 6836 .info = lpfc_info, 6837 .queuecommand = lpfc_queuecommand, 6838 .eh_timed_out = fc_eh_timed_out, 6839 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 6840 .eh_abort_handler = lpfc_abort_handler, 6841 .eh_device_reset_handler = lpfc_device_reset_handler, 6842 .eh_target_reset_handler = lpfc_target_reset_handler, 6843 .eh_host_reset_handler = lpfc_host_reset_handler, 6844 .slave_alloc = lpfc_slave_alloc, 6845 .slave_configure = lpfc_slave_configure, 6846 .slave_destroy = lpfc_slave_destroy, 6847 .scan_finished = lpfc_scan_finished, 6848 .this_id = -1, 6849 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6850 .cmd_per_lun = LPFC_CMD_PER_LUN, 6851 .shost_groups = lpfc_hba_groups, 6852 .max_sectors = 0xFFFFFFFF, 6853 .vendor_id = LPFC_NL_VENDOR_ID, 6854 .change_queue_depth = scsi_change_queue_depth, 6855 .track_queue_depth = 1, 6856 }; 6857