1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <linux/blk-cgroup.h> 32 #include <net/checksum.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_eh.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_version.h" 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 54 #define LPFC_RESET_WAIT 2 55 #define LPFC_ABORT_WAIT 2 56 57 static char *dif_op_str[] = { 58 "PROT_NORMAL", 59 "PROT_READ_INSERT", 60 "PROT_WRITE_STRIP", 61 "PROT_READ_STRIP", 62 "PROT_WRITE_INSERT", 63 "PROT_READ_PASS", 64 "PROT_WRITE_PASS", 65 }; 66 67 struct scsi_dif_tuple { 68 __be16 guard_tag; /* Checksum */ 69 __be16 app_tag; /* Opaque storage */ 70 __be32 ref_tag; /* Target LBA or indirect LBA */ 71 }; 72 73 static struct lpfc_rport_data * 74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 75 { 76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 77 78 if (vport->phba->cfg_fof) 79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 80 else 81 return (struct lpfc_rport_data *)sdev->hostdata; 82 } 83 84 static void 85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 86 static void 87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 88 static int 89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 90 static void 91 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 92 struct lpfc_vmid *vmp); 93 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 94 *cmd, struct lpfc_vmid *vmp, 95 union lpfc_vmid_io_tag *tag); 96 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 97 struct lpfc_vmid *vmid); 98 99 /** 100 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 101 * @phba: Pointer to HBA object. 102 * @lpfc_cmd: lpfc scsi command object pointer. 103 * 104 * This function is called from the lpfc_prep_task_mgmt_cmd function to 105 * set the last bit in the response sge entry. 106 **/ 107 static void 108 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 109 struct lpfc_io_buf *lpfc_cmd) 110 { 111 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 112 if (sgl) { 113 sgl += 1; 114 sgl->word2 = le32_to_cpu(sgl->word2); 115 bf_set(lpfc_sli4_sge_last, sgl, 1); 116 sgl->word2 = cpu_to_le32(sgl->word2); 117 } 118 } 119 120 #define LPFC_INVALID_REFTAG ((u32)-1) 121 122 /** 123 * lpfc_update_stats - Update statistical data for the command completion 124 * @vport: The virtual port on which this call is executing. 125 * @lpfc_cmd: lpfc scsi command object pointer. 126 * 127 * This function is called when there is a command completion and this 128 * function updates the statistical data for the command completion. 129 **/ 130 static void 131 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 132 { 133 struct lpfc_hba *phba = vport->phba; 134 struct lpfc_rport_data *rdata; 135 struct lpfc_nodelist *pnode; 136 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 137 unsigned long flags; 138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 139 unsigned long latency; 140 int i; 141 142 if (!vport->stat_data_enabled || 143 vport->stat_data_blocked || 144 (cmd->result)) 145 return; 146 147 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 148 rdata = lpfc_cmd->rdata; 149 pnode = rdata->pnode; 150 151 spin_lock_irqsave(shost->host_lock, flags); 152 if (!pnode || 153 !pnode->lat_data || 154 (phba->bucket_type == LPFC_NO_BUCKET)) { 155 spin_unlock_irqrestore(shost->host_lock, flags); 156 return; 157 } 158 159 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 160 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 161 phba->bucket_step; 162 /* check array subscript bounds */ 163 if (i < 0) 164 i = 0; 165 else if (i >= LPFC_MAX_BUCKET_COUNT) 166 i = LPFC_MAX_BUCKET_COUNT - 1; 167 } else { 168 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 169 if (latency <= (phba->bucket_base + 170 ((1<<i)*phba->bucket_step))) 171 break; 172 } 173 174 pnode->lat_data[i].cmd_count++; 175 spin_unlock_irqrestore(shost->host_lock, flags); 176 } 177 178 /** 179 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 180 * @phba: The Hba for which this call is being executed. 181 * 182 * This routine is called when there is resource error in driver or firmware. 183 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 184 * posts at most 1 event each second. This routine wakes up worker thread of 185 * @phba to process WORKER_RAM_DOWN_EVENT event. 186 * 187 * This routine should be called with no lock held. 188 **/ 189 void 190 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 191 { 192 unsigned long flags; 193 uint32_t evt_posted; 194 unsigned long expires; 195 196 spin_lock_irqsave(&phba->hbalock, flags); 197 atomic_inc(&phba->num_rsrc_err); 198 phba->last_rsrc_error_time = jiffies; 199 200 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 201 if (time_after(expires, jiffies)) { 202 spin_unlock_irqrestore(&phba->hbalock, flags); 203 return; 204 } 205 206 phba->last_ramp_down_time = jiffies; 207 208 spin_unlock_irqrestore(&phba->hbalock, flags); 209 210 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 211 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 212 if (!evt_posted) 213 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 214 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 215 216 if (!evt_posted) 217 lpfc_worker_wake_up(phba); 218 return; 219 } 220 221 /** 222 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 223 * @phba: The Hba for which this call is being executed. 224 * 225 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 226 * thread.This routine reduces queue depth for all scsi device on each vport 227 * associated with @phba. 228 **/ 229 void 230 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 231 { 232 struct lpfc_vport **vports; 233 struct Scsi_Host *shost; 234 struct scsi_device *sdev; 235 unsigned long new_queue_depth; 236 unsigned long num_rsrc_err, num_cmd_success; 237 int i; 238 239 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 240 num_cmd_success = atomic_read(&phba->num_cmd_success); 241 242 /* 243 * The error and success command counters are global per 244 * driver instance. If another handler has already 245 * operated on this error event, just exit. 246 */ 247 if (num_rsrc_err == 0) 248 return; 249 250 vports = lpfc_create_vport_work_array(phba); 251 if (vports != NULL) 252 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 253 shost = lpfc_shost_from_vport(vports[i]); 254 shost_for_each_device(sdev, shost) { 255 new_queue_depth = 256 sdev->queue_depth * num_rsrc_err / 257 (num_rsrc_err + num_cmd_success); 258 if (!new_queue_depth) 259 new_queue_depth = sdev->queue_depth - 1; 260 else 261 new_queue_depth = sdev->queue_depth - 262 new_queue_depth; 263 scsi_change_queue_depth(sdev, new_queue_depth); 264 } 265 } 266 lpfc_destroy_vport_work_array(phba, vports); 267 atomic_set(&phba->num_rsrc_err, 0); 268 atomic_set(&phba->num_cmd_success, 0); 269 } 270 271 /** 272 * lpfc_scsi_dev_block - set all scsi hosts to block state 273 * @phba: Pointer to HBA context object. 274 * 275 * This function walks vport list and set each SCSI host to block state 276 * by invoking fc_remote_port_delete() routine. This function is invoked 277 * with EEH when device's PCI slot has been permanently disabled. 278 **/ 279 void 280 lpfc_scsi_dev_block(struct lpfc_hba *phba) 281 { 282 struct lpfc_vport **vports; 283 struct Scsi_Host *shost; 284 struct scsi_device *sdev; 285 struct fc_rport *rport; 286 int i; 287 288 vports = lpfc_create_vport_work_array(phba); 289 if (vports != NULL) 290 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 291 shost = lpfc_shost_from_vport(vports[i]); 292 shost_for_each_device(sdev, shost) { 293 rport = starget_to_rport(scsi_target(sdev)); 294 fc_remote_port_delete(rport); 295 } 296 } 297 lpfc_destroy_vport_work_array(phba, vports); 298 } 299 300 /** 301 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 302 * @vport: The virtual port for which this call being executed. 303 * @num_to_alloc: The requested number of buffers to allocate. 304 * 305 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 306 * the scsi buffer contains all the necessary information needed to initiate 307 * a SCSI I/O. The non-DMAable buffer region contains information to build 308 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 309 * and the initial BPL. In addition to allocating memory, the FCP CMND and 310 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 311 * 312 * Return codes: 313 * int - number of scsi buffers that were allocated. 314 * 0 = failure, less than num_to_alloc is a partial failure. 315 **/ 316 static int 317 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 318 { 319 struct lpfc_hba *phba = vport->phba; 320 struct lpfc_io_buf *psb; 321 struct ulp_bde64 *bpl; 322 IOCB_t *iocb; 323 dma_addr_t pdma_phys_fcp_cmd; 324 dma_addr_t pdma_phys_fcp_rsp; 325 dma_addr_t pdma_phys_sgl; 326 uint16_t iotag; 327 int bcnt, bpl_size; 328 329 bpl_size = phba->cfg_sg_dma_buf_size - 330 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 331 332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 333 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 334 num_to_alloc, phba->cfg_sg_dma_buf_size, 335 (int)sizeof(struct fcp_cmnd), 336 (int)sizeof(struct fcp_rsp), bpl_size); 337 338 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 339 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 340 if (!psb) 341 break; 342 343 /* 344 * Get memory from the pci pool to map the virt space to pci 345 * bus space for an I/O. The DMA buffer includes space for the 346 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 347 * necessary to support the sg_tablesize. 348 */ 349 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 350 GFP_KERNEL, &psb->dma_handle); 351 if (!psb->data) { 352 kfree(psb); 353 break; 354 } 355 356 357 /* Allocate iotag for psb->cur_iocbq. */ 358 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 359 if (iotag == 0) { 360 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 361 psb->data, psb->dma_handle); 362 kfree(psb); 363 break; 364 } 365 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 366 367 psb->fcp_cmnd = psb->data; 368 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 369 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 370 sizeof(struct fcp_rsp); 371 372 /* Initialize local short-hand pointers. */ 373 bpl = (struct ulp_bde64 *)psb->dma_sgl; 374 pdma_phys_fcp_cmd = psb->dma_handle; 375 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 376 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 377 sizeof(struct fcp_rsp); 378 379 /* 380 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 381 * are sg list bdes. Initialize the first two and leave the 382 * rest for queuecommand. 383 */ 384 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 385 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 386 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 387 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 388 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 389 390 /* Setup the physical region for the FCP RSP */ 391 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 392 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 393 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 394 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 395 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 396 397 /* 398 * Since the IOCB for the FCP I/O is built into this 399 * lpfc_scsi_buf, initialize it with all known data now. 400 */ 401 iocb = &psb->cur_iocbq.iocb; 402 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 403 if ((phba->sli_rev == 3) && 404 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 405 /* fill in immediate fcp command BDE */ 406 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 407 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 408 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 409 unsli3.fcp_ext.icd); 410 iocb->un.fcpi64.bdl.addrHigh = 0; 411 iocb->ulpBdeCount = 0; 412 iocb->ulpLe = 0; 413 /* fill in response BDE */ 414 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 415 BUFF_TYPE_BDE_64; 416 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 417 sizeof(struct fcp_rsp); 418 iocb->unsli3.fcp_ext.rbde.addrLow = 419 putPaddrLow(pdma_phys_fcp_rsp); 420 iocb->unsli3.fcp_ext.rbde.addrHigh = 421 putPaddrHigh(pdma_phys_fcp_rsp); 422 } else { 423 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 424 iocb->un.fcpi64.bdl.bdeSize = 425 (2 * sizeof(struct ulp_bde64)); 426 iocb->un.fcpi64.bdl.addrLow = 427 putPaddrLow(pdma_phys_sgl); 428 iocb->un.fcpi64.bdl.addrHigh = 429 putPaddrHigh(pdma_phys_sgl); 430 iocb->ulpBdeCount = 1; 431 iocb->ulpLe = 1; 432 } 433 iocb->ulpClass = CLASS3; 434 psb->status = IOSTAT_SUCCESS; 435 /* Put it back into the SCSI buffer list */ 436 psb->cur_iocbq.context1 = psb; 437 spin_lock_init(&psb->buf_lock); 438 lpfc_release_scsi_buf_s3(phba, psb); 439 440 } 441 442 return bcnt; 443 } 444 445 /** 446 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 447 * @vport: pointer to lpfc vport data structure. 448 * 449 * This routine is invoked by the vport cleanup for deletions and the cleanup 450 * for an ndlp on removal. 451 **/ 452 void 453 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 454 { 455 struct lpfc_hba *phba = vport->phba; 456 struct lpfc_io_buf *psb, *next_psb; 457 struct lpfc_sli4_hdw_queue *qp; 458 unsigned long iflag = 0; 459 int idx; 460 461 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 462 return; 463 464 spin_lock_irqsave(&phba->hbalock, iflag); 465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 466 qp = &phba->sli4_hba.hdwq[idx]; 467 468 spin_lock(&qp->abts_io_buf_list_lock); 469 list_for_each_entry_safe(psb, next_psb, 470 &qp->lpfc_abts_io_buf_list, list) { 471 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) 472 continue; 473 474 if (psb->rdata && psb->rdata->pnode && 475 psb->rdata->pnode->vport == vport) 476 psb->rdata = NULL; 477 } 478 spin_unlock(&qp->abts_io_buf_list_lock); 479 } 480 spin_unlock_irqrestore(&phba->hbalock, iflag); 481 } 482 483 /** 484 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 485 * @phba: pointer to lpfc hba data structure. 486 * @axri: pointer to the fcp xri abort wcqe structure. 487 * @idx: index into hdwq 488 * 489 * This routine is invoked by the worker thread to process a SLI4 fast-path 490 * FCP or NVME aborted xri. 491 **/ 492 void 493 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 494 struct sli4_wcqe_xri_aborted *axri, int idx) 495 { 496 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 497 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 498 struct lpfc_io_buf *psb, *next_psb; 499 struct lpfc_sli4_hdw_queue *qp; 500 unsigned long iflag = 0; 501 struct lpfc_iocbq *iocbq; 502 int i; 503 struct lpfc_nodelist *ndlp; 504 int rrq_empty = 0; 505 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 506 struct scsi_cmnd *cmd; 507 508 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 509 return; 510 511 qp = &phba->sli4_hba.hdwq[idx]; 512 spin_lock_irqsave(&phba->hbalock, iflag); 513 spin_lock(&qp->abts_io_buf_list_lock); 514 list_for_each_entry_safe(psb, next_psb, 515 &qp->lpfc_abts_io_buf_list, list) { 516 if (psb->cur_iocbq.sli4_xritag == xri) { 517 list_del_init(&psb->list); 518 psb->flags &= ~LPFC_SBUF_XBUSY; 519 psb->status = IOSTAT_SUCCESS; 520 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { 521 qp->abts_nvme_io_bufs--; 522 spin_unlock(&qp->abts_io_buf_list_lock); 523 spin_unlock_irqrestore(&phba->hbalock, iflag); 524 lpfc_sli4_nvme_xri_aborted(phba, axri, psb); 525 return; 526 } 527 qp->abts_scsi_io_bufs--; 528 spin_unlock(&qp->abts_io_buf_list_lock); 529 530 if (psb->rdata && psb->rdata->pnode) 531 ndlp = psb->rdata->pnode; 532 else 533 ndlp = NULL; 534 535 rrq_empty = list_empty(&phba->active_rrq_list); 536 spin_unlock_irqrestore(&phba->hbalock, iflag); 537 if (ndlp) { 538 lpfc_set_rrq_active(phba, ndlp, 539 psb->cur_iocbq.sli4_lxritag, rxid, 1); 540 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 541 } 542 543 if (phba->cfg_fcp_wait_abts_rsp) { 544 spin_lock_irqsave(&psb->buf_lock, iflag); 545 cmd = psb->pCmd; 546 psb->pCmd = NULL; 547 spin_unlock_irqrestore(&psb->buf_lock, iflag); 548 549 /* The sdev is not guaranteed to be valid post 550 * scsi_done upcall. 551 */ 552 if (cmd) 553 cmd->scsi_done(cmd); 554 555 /* 556 * We expect there is an abort thread waiting 557 * for command completion wake up the thread. 558 */ 559 spin_lock_irqsave(&psb->buf_lock, iflag); 560 psb->cur_iocbq.iocb_flag &= 561 ~LPFC_DRIVER_ABORTED; 562 if (psb->waitq) 563 wake_up(psb->waitq); 564 spin_unlock_irqrestore(&psb->buf_lock, iflag); 565 } 566 567 lpfc_release_scsi_buf_s4(phba, psb); 568 if (rrq_empty) 569 lpfc_worker_wake_up(phba); 570 return; 571 } 572 } 573 spin_unlock(&qp->abts_io_buf_list_lock); 574 for (i = 1; i <= phba->sli.last_iotag; i++) { 575 iocbq = phba->sli.iocbq_lookup[i]; 576 577 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 578 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 579 continue; 580 if (iocbq->sli4_xritag != xri) 581 continue; 582 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 583 psb->flags &= ~LPFC_SBUF_XBUSY; 584 spin_unlock_irqrestore(&phba->hbalock, iflag); 585 if (!list_empty(&pring->txq)) 586 lpfc_worker_wake_up(phba); 587 return; 588 589 } 590 spin_unlock_irqrestore(&phba->hbalock, iflag); 591 } 592 593 /** 594 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 595 * @phba: The HBA for which this call is being executed. 596 * @ndlp: pointer to a node-list data structure. 597 * @cmnd: Pointer to scsi_cmnd data structure. 598 * 599 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 600 * and returns to caller. 601 * 602 * Return codes: 603 * NULL - Error 604 * Pointer to lpfc_scsi_buf - Success 605 **/ 606 static struct lpfc_io_buf * 607 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 608 struct scsi_cmnd *cmnd) 609 { 610 struct lpfc_io_buf *lpfc_cmd = NULL; 611 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 612 unsigned long iflag = 0; 613 614 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 615 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 616 list); 617 if (!lpfc_cmd) { 618 spin_lock(&phba->scsi_buf_list_put_lock); 619 list_splice(&phba->lpfc_scsi_buf_list_put, 620 &phba->lpfc_scsi_buf_list_get); 621 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 622 list_remove_head(scsi_buf_list_get, lpfc_cmd, 623 struct lpfc_io_buf, list); 624 spin_unlock(&phba->scsi_buf_list_put_lock); 625 } 626 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 627 628 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 629 atomic_inc(&ndlp->cmd_pending); 630 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 631 } 632 return lpfc_cmd; 633 } 634 /** 635 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 636 * @phba: The HBA for which this call is being executed. 637 * @ndlp: pointer to a node-list data structure. 638 * @cmnd: Pointer to scsi_cmnd data structure. 639 * 640 * This routine removes a scsi buffer from head of @hdwq io_buf_list 641 * and returns to caller. 642 * 643 * Return codes: 644 * NULL - Error 645 * Pointer to lpfc_scsi_buf - Success 646 **/ 647 static struct lpfc_io_buf * 648 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 649 struct scsi_cmnd *cmnd) 650 { 651 struct lpfc_io_buf *lpfc_cmd; 652 struct lpfc_sli4_hdw_queue *qp; 653 struct sli4_sge *sgl; 654 dma_addr_t pdma_phys_fcp_rsp; 655 dma_addr_t pdma_phys_fcp_cmd; 656 uint32_t cpu, idx; 657 int tag; 658 struct fcp_cmd_rsp_buf *tmp = NULL; 659 660 cpu = raw_smp_processor_id(); 661 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 662 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 663 idx = blk_mq_unique_tag_to_hwq(tag); 664 } else { 665 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 666 } 667 668 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 669 !phba->cfg_xri_rebalancing); 670 if (!lpfc_cmd) { 671 qp = &phba->sli4_hba.hdwq[idx]; 672 qp->empty_io_bufs++; 673 return NULL; 674 } 675 676 /* Setup key fields in buffer that may have been changed 677 * if other protocols used this buffer. 678 */ 679 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP; 680 lpfc_cmd->prot_seg_cnt = 0; 681 lpfc_cmd->seg_cnt = 0; 682 lpfc_cmd->timeout = 0; 683 lpfc_cmd->flags = 0; 684 lpfc_cmd->start_time = jiffies; 685 lpfc_cmd->waitq = NULL; 686 lpfc_cmd->cpu = cpu; 687 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 688 lpfc_cmd->prot_data_type = 0; 689 #endif 690 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 691 if (!tmp) { 692 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 693 return NULL; 694 } 695 696 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 697 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 698 699 /* 700 * The first two SGEs are the FCP_CMD and FCP_RSP. 701 * The balance are sg list bdes. Initialize the 702 * first two and leave the rest for queuecommand. 703 */ 704 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 705 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 706 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 707 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 708 sgl->word2 = le32_to_cpu(sgl->word2); 709 bf_set(lpfc_sli4_sge_last, sgl, 0); 710 sgl->word2 = cpu_to_le32(sgl->word2); 711 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 712 sgl++; 713 714 /* Setup the physical region for the FCP RSP */ 715 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 716 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 717 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 718 sgl->word2 = le32_to_cpu(sgl->word2); 719 bf_set(lpfc_sli4_sge_last, sgl, 1); 720 sgl->word2 = cpu_to_le32(sgl->word2); 721 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 722 723 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 724 atomic_inc(&ndlp->cmd_pending); 725 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 726 } 727 return lpfc_cmd; 728 } 729 /** 730 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 731 * @phba: The HBA for which this call is being executed. 732 * @ndlp: pointer to a node-list data structure. 733 * @cmnd: Pointer to scsi_cmnd data structure. 734 * 735 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 736 * and returns to caller. 737 * 738 * Return codes: 739 * NULL - Error 740 * Pointer to lpfc_scsi_buf - Success 741 **/ 742 static struct lpfc_io_buf* 743 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 744 struct scsi_cmnd *cmnd) 745 { 746 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 747 } 748 749 /** 750 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 751 * @phba: The Hba for which this call is being executed. 752 * @psb: The scsi buffer which is being released. 753 * 754 * This routine releases @psb scsi buffer by adding it to tail of @phba 755 * lpfc_scsi_buf_list list. 756 **/ 757 static void 758 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 759 { 760 unsigned long iflag = 0; 761 762 psb->seg_cnt = 0; 763 psb->prot_seg_cnt = 0; 764 765 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 766 psb->pCmd = NULL; 767 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 768 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 769 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 770 } 771 772 /** 773 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 774 * @phba: The Hba for which this call is being executed. 775 * @psb: The scsi buffer which is being released. 776 * 777 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 778 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 779 * and cannot be reused for at least RA_TOV amount of time if it was 780 * aborted. 781 **/ 782 static void 783 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 784 { 785 struct lpfc_sli4_hdw_queue *qp; 786 unsigned long iflag = 0; 787 788 psb->seg_cnt = 0; 789 psb->prot_seg_cnt = 0; 790 791 qp = psb->hdwq; 792 if (psb->flags & LPFC_SBUF_XBUSY) { 793 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 794 if (!phba->cfg_fcp_wait_abts_rsp) 795 psb->pCmd = NULL; 796 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 797 qp->abts_scsi_io_bufs++; 798 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 799 } else { 800 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 801 } 802 } 803 804 /** 805 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 806 * @phba: The Hba for which this call is being executed. 807 * @psb: The scsi buffer which is being released. 808 * 809 * This routine releases @psb scsi buffer by adding it to tail of @phba 810 * lpfc_scsi_buf_list list. 811 **/ 812 static void 813 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 814 { 815 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 816 atomic_dec(&psb->ndlp->cmd_pending); 817 818 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 819 phba->lpfc_release_scsi_buf(phba, psb); 820 } 821 822 /** 823 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 824 * @data: A pointer to the immediate command data portion of the IOCB. 825 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 826 * 827 * The routine copies the entire FCP command from @fcp_cmnd to @data while 828 * byte swapping the data to big endian format for transmission on the wire. 829 **/ 830 static void 831 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 832 { 833 int i, j; 834 835 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 836 i += sizeof(uint32_t), j++) { 837 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 838 } 839 } 840 841 /** 842 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 843 * @phba: The Hba for which this call is being executed. 844 * @lpfc_cmd: The scsi buffer which is going to be mapped. 845 * 846 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 847 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 848 * through sg elements and format the bde. This routine also initializes all 849 * IOCB fields which are dependent on scsi command request buffer. 850 * 851 * Return codes: 852 * 1 - Error 853 * 0 - Success 854 **/ 855 static int 856 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 857 { 858 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 859 struct scatterlist *sgel = NULL; 860 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 861 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 862 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 863 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 864 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 865 dma_addr_t physaddr; 866 uint32_t num_bde = 0; 867 int nseg, datadir = scsi_cmnd->sc_data_direction; 868 869 /* 870 * There are three possibilities here - use scatter-gather segment, use 871 * the single mapping, or neither. Start the lpfc command prep by 872 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 873 * data bde entry. 874 */ 875 bpl += 2; 876 if (scsi_sg_count(scsi_cmnd)) { 877 /* 878 * The driver stores the segment count returned from pci_map_sg 879 * because this a count of dma-mappings used to map the use_sg 880 * pages. They are not guaranteed to be the same for those 881 * architectures that implement an IOMMU. 882 */ 883 884 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 885 scsi_sg_count(scsi_cmnd), datadir); 886 if (unlikely(!nseg)) 887 return 1; 888 889 lpfc_cmd->seg_cnt = nseg; 890 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 892 "9064 BLKGRD: %s: Too many sg segments" 893 " from dma_map_sg. Config %d, seg_cnt" 894 " %d\n", __func__, phba->cfg_sg_seg_cnt, 895 lpfc_cmd->seg_cnt); 896 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 897 lpfc_cmd->seg_cnt = 0; 898 scsi_dma_unmap(scsi_cmnd); 899 return 2; 900 } 901 902 /* 903 * The driver established a maximum scatter-gather segment count 904 * during probe that limits the number of sg elements in any 905 * single scsi command. Just run through the seg_cnt and format 906 * the bde's. 907 * When using SLI-3 the driver will try to fit all the BDEs into 908 * the IOCB. If it can't then the BDEs get added to a BPL as it 909 * does for SLI-2 mode. 910 */ 911 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 912 physaddr = sg_dma_address(sgel); 913 if (phba->sli_rev == 3 && 914 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 915 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 916 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 917 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 918 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 919 data_bde->addrLow = putPaddrLow(physaddr); 920 data_bde->addrHigh = putPaddrHigh(physaddr); 921 data_bde++; 922 } else { 923 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 924 bpl->tus.f.bdeSize = sg_dma_len(sgel); 925 bpl->tus.w = le32_to_cpu(bpl->tus.w); 926 bpl->addrLow = 927 le32_to_cpu(putPaddrLow(physaddr)); 928 bpl->addrHigh = 929 le32_to_cpu(putPaddrHigh(physaddr)); 930 bpl++; 931 } 932 } 933 } 934 935 /* 936 * Finish initializing those IOCB fields that are dependent on the 937 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 938 * explicitly reinitialized and for SLI-3 the extended bde count is 939 * explicitly reinitialized since all iocb memory resources are reused. 940 */ 941 if (phba->sli_rev == 3 && 942 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 943 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 944 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 945 /* 946 * The extended IOCB format can only fit 3 BDE or a BPL. 947 * This I/O has more than 3 BDE so the 1st data bde will 948 * be a BPL that is filled in here. 949 */ 950 physaddr = lpfc_cmd->dma_handle; 951 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 952 data_bde->tus.f.bdeSize = (num_bde * 953 sizeof(struct ulp_bde64)); 954 physaddr += (sizeof(struct fcp_cmnd) + 955 sizeof(struct fcp_rsp) + 956 (2 * sizeof(struct ulp_bde64))); 957 data_bde->addrHigh = putPaddrHigh(physaddr); 958 data_bde->addrLow = putPaddrLow(physaddr); 959 /* ebde count includes the response bde and data bpl */ 960 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 961 } else { 962 /* ebde count includes the response bde and data bdes */ 963 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 964 } 965 } else { 966 iocb_cmd->un.fcpi64.bdl.bdeSize = 967 ((num_bde + 2) * sizeof(struct ulp_bde64)); 968 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 969 } 970 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 971 972 /* 973 * Due to difference in data length between DIF/non-DIF paths, 974 * we need to set word 4 of IOCB here 975 */ 976 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 977 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 978 return 0; 979 } 980 981 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 982 983 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 984 #define BG_ERR_INIT 0x1 985 /* Return BG_ERR_TGT if error injection is detected by Target */ 986 #define BG_ERR_TGT 0x2 987 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 988 #define BG_ERR_SWAP 0x10 989 /* 990 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 991 * error injection 992 */ 993 #define BG_ERR_CHECK 0x20 994 995 /** 996 * lpfc_bg_err_inject - Determine if we should inject an error 997 * @phba: The Hba for which this call is being executed. 998 * @sc: The SCSI command to examine 999 * @reftag: (out) BlockGuard reference tag for transmitted data 1000 * @apptag: (out) BlockGuard application tag for transmitted data 1001 * @new_guard: (in) Value to replace CRC with if needed 1002 * 1003 * Returns BG_ERR_* bit mask or 0 if request ignored 1004 **/ 1005 static int 1006 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1007 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1008 { 1009 struct scatterlist *sgpe; /* s/g prot entry */ 1010 struct lpfc_io_buf *lpfc_cmd = NULL; 1011 struct scsi_dif_tuple *src = NULL; 1012 struct lpfc_nodelist *ndlp; 1013 struct lpfc_rport_data *rdata; 1014 uint32_t op = scsi_get_prot_op(sc); 1015 uint32_t blksize; 1016 uint32_t numblks; 1017 u32 lba; 1018 int rc = 0; 1019 int blockoff = 0; 1020 1021 if (op == SCSI_PROT_NORMAL) 1022 return 0; 1023 1024 sgpe = scsi_prot_sglist(sc); 1025 lba = scsi_prot_ref_tag(sc); 1026 if (lba == LPFC_INVALID_REFTAG) 1027 return 0; 1028 1029 /* First check if we need to match the LBA */ 1030 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1031 blksize = scsi_prot_interval(sc); 1032 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1033 1034 /* Make sure we have the right LBA if one is specified */ 1035 if (phba->lpfc_injerr_lba < (u64)lba || 1036 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 1037 return 0; 1038 if (sgpe) { 1039 blockoff = phba->lpfc_injerr_lba - (u64)lba; 1040 numblks = sg_dma_len(sgpe) / 1041 sizeof(struct scsi_dif_tuple); 1042 if (numblks < blockoff) 1043 blockoff = numblks; 1044 } 1045 } 1046 1047 /* Next check if we need to match the remote NPortID or WWPN */ 1048 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1049 if (rdata && rdata->pnode) { 1050 ndlp = rdata->pnode; 1051 1052 /* Make sure we have the right NPortID if one is specified */ 1053 if (phba->lpfc_injerr_nportid && 1054 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1055 return 0; 1056 1057 /* 1058 * Make sure we have the right WWPN if one is specified. 1059 * wwn[0] should be a non-zero NAA in a good WWPN. 1060 */ 1061 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1062 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1063 sizeof(struct lpfc_name)) != 0)) 1064 return 0; 1065 } 1066 1067 /* Setup a ptr to the protection data if the SCSI host provides it */ 1068 if (sgpe) { 1069 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1070 src += blockoff; 1071 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1072 } 1073 1074 /* Should we change the Reference Tag */ 1075 if (reftag) { 1076 if (phba->lpfc_injerr_wref_cnt) { 1077 switch (op) { 1078 case SCSI_PROT_WRITE_PASS: 1079 if (src) { 1080 /* 1081 * For WRITE_PASS, force the error 1082 * to be sent on the wire. It should 1083 * be detected by the Target. 1084 * If blockoff != 0 error will be 1085 * inserted in middle of the IO. 1086 */ 1087 1088 lpfc_printf_log(phba, KERN_ERR, 1089 LOG_TRACE_EVENT, 1090 "9076 BLKGRD: Injecting reftag error: " 1091 "write lba x%lx + x%x oldrefTag x%x\n", 1092 (unsigned long)lba, blockoff, 1093 be32_to_cpu(src->ref_tag)); 1094 1095 /* 1096 * Save the old ref_tag so we can 1097 * restore it on completion. 1098 */ 1099 if (lpfc_cmd) { 1100 lpfc_cmd->prot_data_type = 1101 LPFC_INJERR_REFTAG; 1102 lpfc_cmd->prot_data_segment = 1103 src; 1104 lpfc_cmd->prot_data = 1105 src->ref_tag; 1106 } 1107 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1108 phba->lpfc_injerr_wref_cnt--; 1109 if (phba->lpfc_injerr_wref_cnt == 0) { 1110 phba->lpfc_injerr_nportid = 0; 1111 phba->lpfc_injerr_lba = 1112 LPFC_INJERR_LBA_OFF; 1113 memset(&phba->lpfc_injerr_wwpn, 1114 0, sizeof(struct lpfc_name)); 1115 } 1116 rc = BG_ERR_TGT | BG_ERR_CHECK; 1117 1118 break; 1119 } 1120 fallthrough; 1121 case SCSI_PROT_WRITE_INSERT: 1122 /* 1123 * For WRITE_INSERT, force the error 1124 * to be sent on the wire. It should be 1125 * detected by the Target. 1126 */ 1127 /* DEADBEEF will be the reftag on the wire */ 1128 *reftag = 0xDEADBEEF; 1129 phba->lpfc_injerr_wref_cnt--; 1130 if (phba->lpfc_injerr_wref_cnt == 0) { 1131 phba->lpfc_injerr_nportid = 0; 1132 phba->lpfc_injerr_lba = 1133 LPFC_INJERR_LBA_OFF; 1134 memset(&phba->lpfc_injerr_wwpn, 1135 0, sizeof(struct lpfc_name)); 1136 } 1137 rc = BG_ERR_TGT | BG_ERR_CHECK; 1138 1139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1140 "9078 BLKGRD: Injecting reftag error: " 1141 "write lba x%lx\n", (unsigned long)lba); 1142 break; 1143 case SCSI_PROT_WRITE_STRIP: 1144 /* 1145 * For WRITE_STRIP and WRITE_PASS, 1146 * force the error on data 1147 * being copied from SLI-Host to SLI-Port. 1148 */ 1149 *reftag = 0xDEADBEEF; 1150 phba->lpfc_injerr_wref_cnt--; 1151 if (phba->lpfc_injerr_wref_cnt == 0) { 1152 phba->lpfc_injerr_nportid = 0; 1153 phba->lpfc_injerr_lba = 1154 LPFC_INJERR_LBA_OFF; 1155 memset(&phba->lpfc_injerr_wwpn, 1156 0, sizeof(struct lpfc_name)); 1157 } 1158 rc = BG_ERR_INIT; 1159 1160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1161 "9077 BLKGRD: Injecting reftag error: " 1162 "write lba x%lx\n", (unsigned long)lba); 1163 break; 1164 } 1165 } 1166 if (phba->lpfc_injerr_rref_cnt) { 1167 switch (op) { 1168 case SCSI_PROT_READ_INSERT: 1169 case SCSI_PROT_READ_STRIP: 1170 case SCSI_PROT_READ_PASS: 1171 /* 1172 * For READ_STRIP and READ_PASS, force the 1173 * error on data being read off the wire. It 1174 * should force an IO error to the driver. 1175 */ 1176 *reftag = 0xDEADBEEF; 1177 phba->lpfc_injerr_rref_cnt--; 1178 if (phba->lpfc_injerr_rref_cnt == 0) { 1179 phba->lpfc_injerr_nportid = 0; 1180 phba->lpfc_injerr_lba = 1181 LPFC_INJERR_LBA_OFF; 1182 memset(&phba->lpfc_injerr_wwpn, 1183 0, sizeof(struct lpfc_name)); 1184 } 1185 rc = BG_ERR_INIT; 1186 1187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1188 "9079 BLKGRD: Injecting reftag error: " 1189 "read lba x%lx\n", (unsigned long)lba); 1190 break; 1191 } 1192 } 1193 } 1194 1195 /* Should we change the Application Tag */ 1196 if (apptag) { 1197 if (phba->lpfc_injerr_wapp_cnt) { 1198 switch (op) { 1199 case SCSI_PROT_WRITE_PASS: 1200 if (src) { 1201 /* 1202 * For WRITE_PASS, force the error 1203 * to be sent on the wire. It should 1204 * be detected by the Target. 1205 * If blockoff != 0 error will be 1206 * inserted in middle of the IO. 1207 */ 1208 1209 lpfc_printf_log(phba, KERN_ERR, 1210 LOG_TRACE_EVENT, 1211 "9080 BLKGRD: Injecting apptag error: " 1212 "write lba x%lx + x%x oldappTag x%x\n", 1213 (unsigned long)lba, blockoff, 1214 be16_to_cpu(src->app_tag)); 1215 1216 /* 1217 * Save the old app_tag so we can 1218 * restore it on completion. 1219 */ 1220 if (lpfc_cmd) { 1221 lpfc_cmd->prot_data_type = 1222 LPFC_INJERR_APPTAG; 1223 lpfc_cmd->prot_data_segment = 1224 src; 1225 lpfc_cmd->prot_data = 1226 src->app_tag; 1227 } 1228 src->app_tag = cpu_to_be16(0xDEAD); 1229 phba->lpfc_injerr_wapp_cnt--; 1230 if (phba->lpfc_injerr_wapp_cnt == 0) { 1231 phba->lpfc_injerr_nportid = 0; 1232 phba->lpfc_injerr_lba = 1233 LPFC_INJERR_LBA_OFF; 1234 memset(&phba->lpfc_injerr_wwpn, 1235 0, sizeof(struct lpfc_name)); 1236 } 1237 rc = BG_ERR_TGT | BG_ERR_CHECK; 1238 break; 1239 } 1240 fallthrough; 1241 case SCSI_PROT_WRITE_INSERT: 1242 /* 1243 * For WRITE_INSERT, force the 1244 * error to be sent on the wire. It should be 1245 * detected by the Target. 1246 */ 1247 /* DEAD will be the apptag on the wire */ 1248 *apptag = 0xDEAD; 1249 phba->lpfc_injerr_wapp_cnt--; 1250 if (phba->lpfc_injerr_wapp_cnt == 0) { 1251 phba->lpfc_injerr_nportid = 0; 1252 phba->lpfc_injerr_lba = 1253 LPFC_INJERR_LBA_OFF; 1254 memset(&phba->lpfc_injerr_wwpn, 1255 0, sizeof(struct lpfc_name)); 1256 } 1257 rc = BG_ERR_TGT | BG_ERR_CHECK; 1258 1259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1260 "0813 BLKGRD: Injecting apptag error: " 1261 "write lba x%lx\n", (unsigned long)lba); 1262 break; 1263 case SCSI_PROT_WRITE_STRIP: 1264 /* 1265 * For WRITE_STRIP and WRITE_PASS, 1266 * force the error on data 1267 * being copied from SLI-Host to SLI-Port. 1268 */ 1269 *apptag = 0xDEAD; 1270 phba->lpfc_injerr_wapp_cnt--; 1271 if (phba->lpfc_injerr_wapp_cnt == 0) { 1272 phba->lpfc_injerr_nportid = 0; 1273 phba->lpfc_injerr_lba = 1274 LPFC_INJERR_LBA_OFF; 1275 memset(&phba->lpfc_injerr_wwpn, 1276 0, sizeof(struct lpfc_name)); 1277 } 1278 rc = BG_ERR_INIT; 1279 1280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1281 "0812 BLKGRD: Injecting apptag error: " 1282 "write lba x%lx\n", (unsigned long)lba); 1283 break; 1284 } 1285 } 1286 if (phba->lpfc_injerr_rapp_cnt) { 1287 switch (op) { 1288 case SCSI_PROT_READ_INSERT: 1289 case SCSI_PROT_READ_STRIP: 1290 case SCSI_PROT_READ_PASS: 1291 /* 1292 * For READ_STRIP and READ_PASS, force the 1293 * error on data being read off the wire. It 1294 * should force an IO error to the driver. 1295 */ 1296 *apptag = 0xDEAD; 1297 phba->lpfc_injerr_rapp_cnt--; 1298 if (phba->lpfc_injerr_rapp_cnt == 0) { 1299 phba->lpfc_injerr_nportid = 0; 1300 phba->lpfc_injerr_lba = 1301 LPFC_INJERR_LBA_OFF; 1302 memset(&phba->lpfc_injerr_wwpn, 1303 0, sizeof(struct lpfc_name)); 1304 } 1305 rc = BG_ERR_INIT; 1306 1307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1308 "0814 BLKGRD: Injecting apptag error: " 1309 "read lba x%lx\n", (unsigned long)lba); 1310 break; 1311 } 1312 } 1313 } 1314 1315 1316 /* Should we change the Guard Tag */ 1317 if (new_guard) { 1318 if (phba->lpfc_injerr_wgrd_cnt) { 1319 switch (op) { 1320 case SCSI_PROT_WRITE_PASS: 1321 rc = BG_ERR_CHECK; 1322 fallthrough; 1323 1324 case SCSI_PROT_WRITE_INSERT: 1325 /* 1326 * For WRITE_INSERT, force the 1327 * error to be sent on the wire. It should be 1328 * detected by the Target. 1329 */ 1330 phba->lpfc_injerr_wgrd_cnt--; 1331 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1332 phba->lpfc_injerr_nportid = 0; 1333 phba->lpfc_injerr_lba = 1334 LPFC_INJERR_LBA_OFF; 1335 memset(&phba->lpfc_injerr_wwpn, 1336 0, sizeof(struct lpfc_name)); 1337 } 1338 1339 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1340 /* Signals the caller to swap CRC->CSUM */ 1341 1342 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1343 "0817 BLKGRD: Injecting guard error: " 1344 "write lba x%lx\n", (unsigned long)lba); 1345 break; 1346 case SCSI_PROT_WRITE_STRIP: 1347 /* 1348 * For WRITE_STRIP and WRITE_PASS, 1349 * force the error on data 1350 * being copied from SLI-Host to SLI-Port. 1351 */ 1352 phba->lpfc_injerr_wgrd_cnt--; 1353 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1354 phba->lpfc_injerr_nportid = 0; 1355 phba->lpfc_injerr_lba = 1356 LPFC_INJERR_LBA_OFF; 1357 memset(&phba->lpfc_injerr_wwpn, 1358 0, sizeof(struct lpfc_name)); 1359 } 1360 1361 rc = BG_ERR_INIT | BG_ERR_SWAP; 1362 /* Signals the caller to swap CRC->CSUM */ 1363 1364 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1365 "0816 BLKGRD: Injecting guard error: " 1366 "write lba x%lx\n", (unsigned long)lba); 1367 break; 1368 } 1369 } 1370 if (phba->lpfc_injerr_rgrd_cnt) { 1371 switch (op) { 1372 case SCSI_PROT_READ_INSERT: 1373 case SCSI_PROT_READ_STRIP: 1374 case SCSI_PROT_READ_PASS: 1375 /* 1376 * For READ_STRIP and READ_PASS, force the 1377 * error on data being read off the wire. It 1378 * should force an IO error to the driver. 1379 */ 1380 phba->lpfc_injerr_rgrd_cnt--; 1381 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1382 phba->lpfc_injerr_nportid = 0; 1383 phba->lpfc_injerr_lba = 1384 LPFC_INJERR_LBA_OFF; 1385 memset(&phba->lpfc_injerr_wwpn, 1386 0, sizeof(struct lpfc_name)); 1387 } 1388 1389 rc = BG_ERR_INIT | BG_ERR_SWAP; 1390 /* Signals the caller to swap CRC->CSUM */ 1391 1392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1393 "0818 BLKGRD: Injecting guard error: " 1394 "read lba x%lx\n", (unsigned long)lba); 1395 } 1396 } 1397 } 1398 1399 return rc; 1400 } 1401 #endif 1402 1403 /** 1404 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1405 * the specified SCSI command. 1406 * @phba: The Hba for which this call is being executed. 1407 * @sc: The SCSI command to examine 1408 * @txop: (out) BlockGuard operation for transmitted data 1409 * @rxop: (out) BlockGuard operation for received data 1410 * 1411 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1412 * 1413 **/ 1414 static int 1415 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1416 uint8_t *txop, uint8_t *rxop) 1417 { 1418 uint8_t ret = 0; 1419 1420 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1421 switch (scsi_get_prot_op(sc)) { 1422 case SCSI_PROT_READ_INSERT: 1423 case SCSI_PROT_WRITE_STRIP: 1424 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1425 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1426 break; 1427 1428 case SCSI_PROT_READ_STRIP: 1429 case SCSI_PROT_WRITE_INSERT: 1430 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1431 *txop = BG_OP_IN_NODIF_OUT_CRC; 1432 break; 1433 1434 case SCSI_PROT_READ_PASS: 1435 case SCSI_PROT_WRITE_PASS: 1436 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1437 *txop = BG_OP_IN_CSUM_OUT_CRC; 1438 break; 1439 1440 case SCSI_PROT_NORMAL: 1441 default: 1442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1443 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1444 scsi_get_prot_op(sc)); 1445 ret = 1; 1446 break; 1447 1448 } 1449 } else { 1450 switch (scsi_get_prot_op(sc)) { 1451 case SCSI_PROT_READ_STRIP: 1452 case SCSI_PROT_WRITE_INSERT: 1453 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1454 *txop = BG_OP_IN_NODIF_OUT_CRC; 1455 break; 1456 1457 case SCSI_PROT_READ_PASS: 1458 case SCSI_PROT_WRITE_PASS: 1459 *rxop = BG_OP_IN_CRC_OUT_CRC; 1460 *txop = BG_OP_IN_CRC_OUT_CRC; 1461 break; 1462 1463 case SCSI_PROT_READ_INSERT: 1464 case SCSI_PROT_WRITE_STRIP: 1465 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1466 *txop = BG_OP_IN_CRC_OUT_NODIF; 1467 break; 1468 1469 case SCSI_PROT_NORMAL: 1470 default: 1471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1472 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1473 scsi_get_prot_op(sc)); 1474 ret = 1; 1475 break; 1476 } 1477 } 1478 1479 return ret; 1480 } 1481 1482 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1483 /** 1484 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1485 * the specified SCSI command in order to force a guard tag error. 1486 * @phba: The Hba for which this call is being executed. 1487 * @sc: The SCSI command to examine 1488 * @txop: (out) BlockGuard operation for transmitted data 1489 * @rxop: (out) BlockGuard operation for received data 1490 * 1491 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1492 * 1493 **/ 1494 static int 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 uint8_t *txop, uint8_t *rxop) 1497 { 1498 1499 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1500 switch (scsi_get_prot_op(sc)) { 1501 case SCSI_PROT_READ_INSERT: 1502 case SCSI_PROT_WRITE_STRIP: 1503 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1504 *txop = BG_OP_IN_CRC_OUT_NODIF; 1505 break; 1506 1507 case SCSI_PROT_READ_STRIP: 1508 case SCSI_PROT_WRITE_INSERT: 1509 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1510 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1511 break; 1512 1513 case SCSI_PROT_READ_PASS: 1514 case SCSI_PROT_WRITE_PASS: 1515 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1516 *txop = BG_OP_IN_CRC_OUT_CSUM; 1517 break; 1518 1519 case SCSI_PROT_NORMAL: 1520 default: 1521 break; 1522 1523 } 1524 } else { 1525 switch (scsi_get_prot_op(sc)) { 1526 case SCSI_PROT_READ_STRIP: 1527 case SCSI_PROT_WRITE_INSERT: 1528 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1529 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1530 break; 1531 1532 case SCSI_PROT_READ_PASS: 1533 case SCSI_PROT_WRITE_PASS: 1534 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1535 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1536 break; 1537 1538 case SCSI_PROT_READ_INSERT: 1539 case SCSI_PROT_WRITE_STRIP: 1540 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1541 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1542 break; 1543 1544 case SCSI_PROT_NORMAL: 1545 default: 1546 break; 1547 } 1548 } 1549 1550 return 0; 1551 } 1552 #endif 1553 1554 /** 1555 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1556 * @phba: The Hba for which this call is being executed. 1557 * @sc: pointer to scsi command we're working on 1558 * @bpl: pointer to buffer list for protection groups 1559 * @datasegcnt: number of segments of data that have been dma mapped 1560 * 1561 * This function sets up BPL buffer list for protection groups of 1562 * type LPFC_PG_TYPE_NO_DIF 1563 * 1564 * This is usually used when the HBA is instructed to generate 1565 * DIFs and insert them into data stream (or strip DIF from 1566 * incoming data stream) 1567 * 1568 * The buffer list consists of just one protection group described 1569 * below: 1570 * +-------------------------+ 1571 * start of prot group --> | PDE_5 | 1572 * +-------------------------+ 1573 * | PDE_6 | 1574 * +-------------------------+ 1575 * | Data BDE | 1576 * +-------------------------+ 1577 * |more Data BDE's ... (opt)| 1578 * +-------------------------+ 1579 * 1580 * 1581 * Note: Data s/g buffers have been dma mapped 1582 * 1583 * Returns the number of BDEs added to the BPL. 1584 **/ 1585 static int 1586 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1587 struct ulp_bde64 *bpl, int datasegcnt) 1588 { 1589 struct scatterlist *sgde = NULL; /* s/g data entry */ 1590 struct lpfc_pde5 *pde5 = NULL; 1591 struct lpfc_pde6 *pde6 = NULL; 1592 dma_addr_t physaddr; 1593 int i = 0, num_bde = 0, status; 1594 int datadir = sc->sc_data_direction; 1595 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1596 uint32_t rc; 1597 #endif 1598 uint32_t checking = 1; 1599 uint32_t reftag; 1600 uint8_t txop, rxop; 1601 1602 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1603 if (status) 1604 goto out; 1605 1606 /* extract some info from the scsi command for pde*/ 1607 reftag = scsi_prot_ref_tag(sc); 1608 if (reftag == LPFC_INVALID_REFTAG) 1609 goto out; 1610 1611 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1612 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1613 if (rc) { 1614 if (rc & BG_ERR_SWAP) 1615 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1616 if (rc & BG_ERR_CHECK) 1617 checking = 0; 1618 } 1619 #endif 1620 1621 /* setup PDE5 with what we have */ 1622 pde5 = (struct lpfc_pde5 *) bpl; 1623 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1624 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1625 1626 /* Endianness conversion if necessary for PDE5 */ 1627 pde5->word0 = cpu_to_le32(pde5->word0); 1628 pde5->reftag = cpu_to_le32(reftag); 1629 1630 /* advance bpl and increment bde count */ 1631 num_bde++; 1632 bpl++; 1633 pde6 = (struct lpfc_pde6 *) bpl; 1634 1635 /* setup PDE6 with the rest of the info */ 1636 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1637 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1638 bf_set(pde6_optx, pde6, txop); 1639 bf_set(pde6_oprx, pde6, rxop); 1640 1641 /* 1642 * We only need to check the data on READs, for WRITEs 1643 * protection data is automatically generated, not checked. 1644 */ 1645 if (datadir == DMA_FROM_DEVICE) { 1646 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1647 bf_set(pde6_ce, pde6, checking); 1648 else 1649 bf_set(pde6_ce, pde6, 0); 1650 1651 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1652 bf_set(pde6_re, pde6, checking); 1653 else 1654 bf_set(pde6_re, pde6, 0); 1655 } 1656 bf_set(pde6_ai, pde6, 1); 1657 bf_set(pde6_ae, pde6, 0); 1658 bf_set(pde6_apptagval, pde6, 0); 1659 1660 /* Endianness conversion if necessary for PDE6 */ 1661 pde6->word0 = cpu_to_le32(pde6->word0); 1662 pde6->word1 = cpu_to_le32(pde6->word1); 1663 pde6->word2 = cpu_to_le32(pde6->word2); 1664 1665 /* advance bpl and increment bde count */ 1666 num_bde++; 1667 bpl++; 1668 1669 /* assumption: caller has already run dma_map_sg on command data */ 1670 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1671 physaddr = sg_dma_address(sgde); 1672 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1673 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1674 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1675 if (datadir == DMA_TO_DEVICE) 1676 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1677 else 1678 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1679 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1680 bpl++; 1681 num_bde++; 1682 } 1683 1684 out: 1685 return num_bde; 1686 } 1687 1688 /** 1689 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1690 * @phba: The Hba for which this call is being executed. 1691 * @sc: pointer to scsi command we're working on 1692 * @bpl: pointer to buffer list for protection groups 1693 * @datacnt: number of segments of data that have been dma mapped 1694 * @protcnt: number of segment of protection data that have been dma mapped 1695 * 1696 * This function sets up BPL buffer list for protection groups of 1697 * type LPFC_PG_TYPE_DIF 1698 * 1699 * This is usually used when DIFs are in their own buffers, 1700 * separate from the data. The HBA can then by instructed 1701 * to place the DIFs in the outgoing stream. For read operations, 1702 * The HBA could extract the DIFs and place it in DIF buffers. 1703 * 1704 * The buffer list for this type consists of one or more of the 1705 * protection groups described below: 1706 * +-------------------------+ 1707 * start of first prot group --> | PDE_5 | 1708 * +-------------------------+ 1709 * | PDE_6 | 1710 * +-------------------------+ 1711 * | PDE_7 (Prot BDE) | 1712 * +-------------------------+ 1713 * | Data BDE | 1714 * +-------------------------+ 1715 * |more Data BDE's ... (opt)| 1716 * +-------------------------+ 1717 * start of new prot group --> | PDE_5 | 1718 * +-------------------------+ 1719 * | ... | 1720 * +-------------------------+ 1721 * 1722 * Note: It is assumed that both data and protection s/g buffers have been 1723 * mapped for DMA 1724 * 1725 * Returns the number of BDEs added to the BPL. 1726 **/ 1727 static int 1728 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1729 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1730 { 1731 struct scatterlist *sgde = NULL; /* s/g data entry */ 1732 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1733 struct lpfc_pde5 *pde5 = NULL; 1734 struct lpfc_pde6 *pde6 = NULL; 1735 struct lpfc_pde7 *pde7 = NULL; 1736 dma_addr_t dataphysaddr, protphysaddr; 1737 unsigned short curr_data = 0, curr_prot = 0; 1738 unsigned int split_offset; 1739 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1740 unsigned int protgrp_blks, protgrp_bytes; 1741 unsigned int remainder, subtotal; 1742 int status; 1743 int datadir = sc->sc_data_direction; 1744 unsigned char pgdone = 0, alldone = 0; 1745 unsigned blksize; 1746 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1747 uint32_t rc; 1748 #endif 1749 uint32_t checking = 1; 1750 uint32_t reftag; 1751 uint8_t txop, rxop; 1752 int num_bde = 0; 1753 1754 sgpe = scsi_prot_sglist(sc); 1755 sgde = scsi_sglist(sc); 1756 1757 if (!sgpe || !sgde) { 1758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1759 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1760 sgpe, sgde); 1761 return 0; 1762 } 1763 1764 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1765 if (status) 1766 goto out; 1767 1768 /* extract some info from the scsi command */ 1769 blksize = scsi_prot_interval(sc); 1770 reftag = scsi_prot_ref_tag(sc); 1771 if (reftag == LPFC_INVALID_REFTAG) 1772 goto out; 1773 1774 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1775 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1776 if (rc) { 1777 if (rc & BG_ERR_SWAP) 1778 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1779 if (rc & BG_ERR_CHECK) 1780 checking = 0; 1781 } 1782 #endif 1783 1784 split_offset = 0; 1785 do { 1786 /* Check to see if we ran out of space */ 1787 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1788 return num_bde + 3; 1789 1790 /* setup PDE5 with what we have */ 1791 pde5 = (struct lpfc_pde5 *) bpl; 1792 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1793 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1794 1795 /* Endianness conversion if necessary for PDE5 */ 1796 pde5->word0 = cpu_to_le32(pde5->word0); 1797 pde5->reftag = cpu_to_le32(reftag); 1798 1799 /* advance bpl and increment bde count */ 1800 num_bde++; 1801 bpl++; 1802 pde6 = (struct lpfc_pde6 *) bpl; 1803 1804 /* setup PDE6 with the rest of the info */ 1805 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1806 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1807 bf_set(pde6_optx, pde6, txop); 1808 bf_set(pde6_oprx, pde6, rxop); 1809 1810 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1811 bf_set(pde6_ce, pde6, checking); 1812 else 1813 bf_set(pde6_ce, pde6, 0); 1814 1815 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1816 bf_set(pde6_re, pde6, checking); 1817 else 1818 bf_set(pde6_re, pde6, 0); 1819 1820 bf_set(pde6_ai, pde6, 1); 1821 bf_set(pde6_ae, pde6, 0); 1822 bf_set(pde6_apptagval, pde6, 0); 1823 1824 /* Endianness conversion if necessary for PDE6 */ 1825 pde6->word0 = cpu_to_le32(pde6->word0); 1826 pde6->word1 = cpu_to_le32(pde6->word1); 1827 pde6->word2 = cpu_to_le32(pde6->word2); 1828 1829 /* advance bpl and increment bde count */ 1830 num_bde++; 1831 bpl++; 1832 1833 /* setup the first BDE that points to protection buffer */ 1834 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1835 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1836 1837 /* must be integer multiple of the DIF block length */ 1838 BUG_ON(protgroup_len % 8); 1839 1840 pde7 = (struct lpfc_pde7 *) bpl; 1841 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1842 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1843 1844 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1845 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1846 1847 protgrp_blks = protgroup_len / 8; 1848 protgrp_bytes = protgrp_blks * blksize; 1849 1850 /* check if this pde is crossing the 4K boundary; if so split */ 1851 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1852 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1853 protgroup_offset += protgroup_remainder; 1854 protgrp_blks = protgroup_remainder / 8; 1855 protgrp_bytes = protgrp_blks * blksize; 1856 } else { 1857 protgroup_offset = 0; 1858 curr_prot++; 1859 } 1860 1861 num_bde++; 1862 1863 /* setup BDE's for data blocks associated with DIF data */ 1864 pgdone = 0; 1865 subtotal = 0; /* total bytes processed for current prot grp */ 1866 while (!pgdone) { 1867 /* Check to see if we ran out of space */ 1868 if (num_bde >= phba->cfg_total_seg_cnt) 1869 return num_bde + 1; 1870 1871 if (!sgde) { 1872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1873 "9065 BLKGRD:%s Invalid data segment\n", 1874 __func__); 1875 return 0; 1876 } 1877 bpl++; 1878 dataphysaddr = sg_dma_address(sgde) + split_offset; 1879 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1880 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1881 1882 remainder = sg_dma_len(sgde) - split_offset; 1883 1884 if ((subtotal + remainder) <= protgrp_bytes) { 1885 /* we can use this whole buffer */ 1886 bpl->tus.f.bdeSize = remainder; 1887 split_offset = 0; 1888 1889 if ((subtotal + remainder) == protgrp_bytes) 1890 pgdone = 1; 1891 } else { 1892 /* must split this buffer with next prot grp */ 1893 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1894 split_offset += bpl->tus.f.bdeSize; 1895 } 1896 1897 subtotal += bpl->tus.f.bdeSize; 1898 1899 if (datadir == DMA_TO_DEVICE) 1900 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1901 else 1902 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1903 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1904 1905 num_bde++; 1906 curr_data++; 1907 1908 if (split_offset) 1909 break; 1910 1911 /* Move to the next s/g segment if possible */ 1912 sgde = sg_next(sgde); 1913 1914 } 1915 1916 if (protgroup_offset) { 1917 /* update the reference tag */ 1918 reftag += protgrp_blks; 1919 bpl++; 1920 continue; 1921 } 1922 1923 /* are we done ? */ 1924 if (curr_prot == protcnt) { 1925 alldone = 1; 1926 } else if (curr_prot < protcnt) { 1927 /* advance to next prot buffer */ 1928 sgpe = sg_next(sgpe); 1929 bpl++; 1930 1931 /* update the reference tag */ 1932 reftag += protgrp_blks; 1933 } else { 1934 /* if we're here, we have a bug */ 1935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1936 "9054 BLKGRD: bug in %s\n", __func__); 1937 } 1938 1939 } while (!alldone); 1940 out: 1941 1942 return num_bde; 1943 } 1944 1945 /** 1946 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1947 * @phba: The Hba for which this call is being executed. 1948 * @sc: pointer to scsi command we're working on 1949 * @sgl: pointer to buffer list for protection groups 1950 * @datasegcnt: number of segments of data that have been dma mapped 1951 * @lpfc_cmd: lpfc scsi command object pointer. 1952 * 1953 * This function sets up SGL buffer list for protection groups of 1954 * type LPFC_PG_TYPE_NO_DIF 1955 * 1956 * This is usually used when the HBA is instructed to generate 1957 * DIFs and insert them into data stream (or strip DIF from 1958 * incoming data stream) 1959 * 1960 * The buffer list consists of just one protection group described 1961 * below: 1962 * +-------------------------+ 1963 * start of prot group --> | DI_SEED | 1964 * +-------------------------+ 1965 * | Data SGE | 1966 * +-------------------------+ 1967 * |more Data SGE's ... (opt)| 1968 * +-------------------------+ 1969 * 1970 * 1971 * Note: Data s/g buffers have been dma mapped 1972 * 1973 * Returns the number of SGEs added to the SGL. 1974 **/ 1975 static int 1976 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1977 struct sli4_sge *sgl, int datasegcnt, 1978 struct lpfc_io_buf *lpfc_cmd) 1979 { 1980 struct scatterlist *sgde = NULL; /* s/g data entry */ 1981 struct sli4_sge_diseed *diseed = NULL; 1982 dma_addr_t physaddr; 1983 int i = 0, num_sge = 0, status; 1984 uint32_t reftag; 1985 uint8_t txop, rxop; 1986 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1987 uint32_t rc; 1988 #endif 1989 uint32_t checking = 1; 1990 uint32_t dma_len; 1991 uint32_t dma_offset = 0; 1992 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1993 int j; 1994 bool lsp_just_set = false; 1995 1996 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1997 if (status) 1998 goto out; 1999 2000 /* extract some info from the scsi command for pde*/ 2001 reftag = scsi_prot_ref_tag(sc); 2002 if (reftag == LPFC_INVALID_REFTAG) 2003 goto out; 2004 2005 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2006 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2007 if (rc) { 2008 if (rc & BG_ERR_SWAP) 2009 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2010 if (rc & BG_ERR_CHECK) 2011 checking = 0; 2012 } 2013 #endif 2014 2015 /* setup DISEED with what we have */ 2016 diseed = (struct sli4_sge_diseed *) sgl; 2017 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2018 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2019 2020 /* Endianness conversion if necessary */ 2021 diseed->ref_tag = cpu_to_le32(reftag); 2022 diseed->ref_tag_tran = diseed->ref_tag; 2023 2024 /* 2025 * We only need to check the data on READs, for WRITEs 2026 * protection data is automatically generated, not checked. 2027 */ 2028 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2029 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 2030 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2031 else 2032 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2033 2034 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2035 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2036 else 2037 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2038 } 2039 2040 /* setup DISEED with the rest of the info */ 2041 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2042 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2043 2044 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2045 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2046 2047 /* Endianness conversion if necessary for DISEED */ 2048 diseed->word2 = cpu_to_le32(diseed->word2); 2049 diseed->word3 = cpu_to_le32(diseed->word3); 2050 2051 /* advance bpl and increment sge count */ 2052 num_sge++; 2053 sgl++; 2054 2055 /* assumption: caller has already run dma_map_sg on command data */ 2056 sgde = scsi_sglist(sc); 2057 j = 3; 2058 for (i = 0; i < datasegcnt; i++) { 2059 /* clear it */ 2060 sgl->word2 = 0; 2061 2062 /* do we need to expand the segment */ 2063 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2064 ((datasegcnt - 1) != i)) { 2065 /* set LSP type */ 2066 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2067 2068 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2069 2070 if (unlikely(!sgl_xtra)) { 2071 lpfc_cmd->seg_cnt = 0; 2072 return 0; 2073 } 2074 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2075 sgl_xtra->dma_phys_sgl)); 2076 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2077 sgl_xtra->dma_phys_sgl)); 2078 2079 } else { 2080 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2081 } 2082 2083 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2084 if ((datasegcnt - 1) == i) 2085 bf_set(lpfc_sli4_sge_last, sgl, 1); 2086 physaddr = sg_dma_address(sgde); 2087 dma_len = sg_dma_len(sgde); 2088 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2089 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2090 2091 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2092 sgl->word2 = cpu_to_le32(sgl->word2); 2093 sgl->sge_len = cpu_to_le32(dma_len); 2094 2095 dma_offset += dma_len; 2096 sgde = sg_next(sgde); 2097 2098 sgl++; 2099 num_sge++; 2100 lsp_just_set = false; 2101 2102 } else { 2103 sgl->word2 = cpu_to_le32(sgl->word2); 2104 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2105 2106 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2107 i = i - 1; 2108 2109 lsp_just_set = true; 2110 } 2111 2112 j++; 2113 2114 } 2115 2116 out: 2117 return num_sge; 2118 } 2119 2120 /** 2121 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2122 * @phba: The Hba for which this call is being executed. 2123 * @sc: pointer to scsi command we're working on 2124 * @sgl: pointer to buffer list for protection groups 2125 * @datacnt: number of segments of data that have been dma mapped 2126 * @protcnt: number of segment of protection data that have been dma mapped 2127 * @lpfc_cmd: lpfc scsi command object pointer. 2128 * 2129 * This function sets up SGL buffer list for protection groups of 2130 * type LPFC_PG_TYPE_DIF 2131 * 2132 * This is usually used when DIFs are in their own buffers, 2133 * separate from the data. The HBA can then by instructed 2134 * to place the DIFs in the outgoing stream. For read operations, 2135 * The HBA could extract the DIFs and place it in DIF buffers. 2136 * 2137 * The buffer list for this type consists of one or more of the 2138 * protection groups described below: 2139 * +-------------------------+ 2140 * start of first prot group --> | DISEED | 2141 * +-------------------------+ 2142 * | DIF (Prot SGE) | 2143 * +-------------------------+ 2144 * | Data SGE | 2145 * +-------------------------+ 2146 * |more Data SGE's ... (opt)| 2147 * +-------------------------+ 2148 * start of new prot group --> | DISEED | 2149 * +-------------------------+ 2150 * | ... | 2151 * +-------------------------+ 2152 * 2153 * Note: It is assumed that both data and protection s/g buffers have been 2154 * mapped for DMA 2155 * 2156 * Returns the number of SGEs added to the SGL. 2157 **/ 2158 static int 2159 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2160 struct sli4_sge *sgl, int datacnt, int protcnt, 2161 struct lpfc_io_buf *lpfc_cmd) 2162 { 2163 struct scatterlist *sgde = NULL; /* s/g data entry */ 2164 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2165 struct sli4_sge_diseed *diseed = NULL; 2166 dma_addr_t dataphysaddr, protphysaddr; 2167 unsigned short curr_data = 0, curr_prot = 0; 2168 unsigned int split_offset; 2169 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2170 unsigned int protgrp_blks, protgrp_bytes; 2171 unsigned int remainder, subtotal; 2172 int status; 2173 unsigned char pgdone = 0, alldone = 0; 2174 unsigned blksize; 2175 uint32_t reftag; 2176 uint8_t txop, rxop; 2177 uint32_t dma_len; 2178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2179 uint32_t rc; 2180 #endif 2181 uint32_t checking = 1; 2182 uint32_t dma_offset = 0; 2183 int num_sge = 0, j = 2; 2184 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2185 2186 sgpe = scsi_prot_sglist(sc); 2187 sgde = scsi_sglist(sc); 2188 2189 if (!sgpe || !sgde) { 2190 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2191 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2192 sgpe, sgde); 2193 return 0; 2194 } 2195 2196 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2197 if (status) 2198 goto out; 2199 2200 /* extract some info from the scsi command */ 2201 blksize = scsi_prot_interval(sc); 2202 reftag = scsi_prot_ref_tag(sc); 2203 if (reftag == LPFC_INVALID_REFTAG) 2204 goto out; 2205 2206 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2207 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2208 if (rc) { 2209 if (rc & BG_ERR_SWAP) 2210 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2211 if (rc & BG_ERR_CHECK) 2212 checking = 0; 2213 } 2214 #endif 2215 2216 split_offset = 0; 2217 do { 2218 /* Check to see if we ran out of space */ 2219 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2220 !(phba->cfg_xpsgl)) 2221 return num_sge + 3; 2222 2223 /* DISEED and DIF have to be together */ 2224 if (!((j + 1) % phba->border_sge_num) || 2225 !((j + 2) % phba->border_sge_num) || 2226 !((j + 3) % phba->border_sge_num)) { 2227 sgl->word2 = 0; 2228 2229 /* set LSP type */ 2230 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2231 2232 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2233 2234 if (unlikely(!sgl_xtra)) { 2235 goto out; 2236 } else { 2237 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2238 sgl_xtra->dma_phys_sgl)); 2239 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2240 sgl_xtra->dma_phys_sgl)); 2241 } 2242 2243 sgl->word2 = cpu_to_le32(sgl->word2); 2244 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2245 2246 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2247 j = 0; 2248 } 2249 2250 /* setup DISEED with what we have */ 2251 diseed = (struct sli4_sge_diseed *) sgl; 2252 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2253 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2254 2255 /* Endianness conversion if necessary */ 2256 diseed->ref_tag = cpu_to_le32(reftag); 2257 diseed->ref_tag_tran = diseed->ref_tag; 2258 2259 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { 2260 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2261 } else { 2262 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2263 /* 2264 * When in this mode, the hardware will replace 2265 * the guard tag from the host with a 2266 * newly generated good CRC for the wire. 2267 * Switch to raw mode here to avoid this 2268 * behavior. What the host sends gets put on the wire. 2269 */ 2270 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2271 txop = BG_OP_RAW_MODE; 2272 rxop = BG_OP_RAW_MODE; 2273 } 2274 } 2275 2276 2277 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2278 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2279 else 2280 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2281 2282 /* setup DISEED with the rest of the info */ 2283 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2284 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2285 2286 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2287 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2288 2289 /* Endianness conversion if necessary for DISEED */ 2290 diseed->word2 = cpu_to_le32(diseed->word2); 2291 diseed->word3 = cpu_to_le32(diseed->word3); 2292 2293 /* advance sgl and increment bde count */ 2294 num_sge++; 2295 2296 sgl++; 2297 j++; 2298 2299 /* setup the first BDE that points to protection buffer */ 2300 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2301 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2302 2303 /* must be integer multiple of the DIF block length */ 2304 BUG_ON(protgroup_len % 8); 2305 2306 /* Now setup DIF SGE */ 2307 sgl->word2 = 0; 2308 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2309 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2310 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2311 sgl->word2 = cpu_to_le32(sgl->word2); 2312 sgl->sge_len = 0; 2313 2314 protgrp_blks = protgroup_len / 8; 2315 protgrp_bytes = protgrp_blks * blksize; 2316 2317 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2318 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2319 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2320 protgroup_offset += protgroup_remainder; 2321 protgrp_blks = protgroup_remainder / 8; 2322 protgrp_bytes = protgrp_blks * blksize; 2323 } else { 2324 protgroup_offset = 0; 2325 curr_prot++; 2326 } 2327 2328 num_sge++; 2329 2330 /* setup SGE's for data blocks associated with DIF data */ 2331 pgdone = 0; 2332 subtotal = 0; /* total bytes processed for current prot grp */ 2333 2334 sgl++; 2335 j++; 2336 2337 while (!pgdone) { 2338 /* Check to see if we ran out of space */ 2339 if ((num_sge >= phba->cfg_total_seg_cnt) && 2340 !phba->cfg_xpsgl) 2341 return num_sge + 1; 2342 2343 if (!sgde) { 2344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2345 "9086 BLKGRD:%s Invalid data segment\n", 2346 __func__); 2347 return 0; 2348 } 2349 2350 if (!((j + 1) % phba->border_sge_num)) { 2351 sgl->word2 = 0; 2352 2353 /* set LSP type */ 2354 bf_set(lpfc_sli4_sge_type, sgl, 2355 LPFC_SGE_TYPE_LSP); 2356 2357 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2358 lpfc_cmd); 2359 2360 if (unlikely(!sgl_xtra)) { 2361 goto out; 2362 } else { 2363 sgl->addr_lo = cpu_to_le32( 2364 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2365 sgl->addr_hi = cpu_to_le32( 2366 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2367 } 2368 2369 sgl->word2 = cpu_to_le32(sgl->word2); 2370 sgl->sge_len = cpu_to_le32( 2371 phba->cfg_sg_dma_buf_size); 2372 2373 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2374 } else { 2375 dataphysaddr = sg_dma_address(sgde) + 2376 split_offset; 2377 2378 remainder = sg_dma_len(sgde) - split_offset; 2379 2380 if ((subtotal + remainder) <= protgrp_bytes) { 2381 /* we can use this whole buffer */ 2382 dma_len = remainder; 2383 split_offset = 0; 2384 2385 if ((subtotal + remainder) == 2386 protgrp_bytes) 2387 pgdone = 1; 2388 } else { 2389 /* must split this buffer with next 2390 * prot grp 2391 */ 2392 dma_len = protgrp_bytes - subtotal; 2393 split_offset += dma_len; 2394 } 2395 2396 subtotal += dma_len; 2397 2398 sgl->word2 = 0; 2399 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2400 dataphysaddr)); 2401 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2402 dataphysaddr)); 2403 bf_set(lpfc_sli4_sge_last, sgl, 0); 2404 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2405 bf_set(lpfc_sli4_sge_type, sgl, 2406 LPFC_SGE_TYPE_DATA); 2407 2408 sgl->sge_len = cpu_to_le32(dma_len); 2409 dma_offset += dma_len; 2410 2411 num_sge++; 2412 curr_data++; 2413 2414 if (split_offset) { 2415 sgl++; 2416 j++; 2417 break; 2418 } 2419 2420 /* Move to the next s/g segment if possible */ 2421 sgde = sg_next(sgde); 2422 2423 sgl++; 2424 } 2425 2426 j++; 2427 } 2428 2429 if (protgroup_offset) { 2430 /* update the reference tag */ 2431 reftag += protgrp_blks; 2432 continue; 2433 } 2434 2435 /* are we done ? */ 2436 if (curr_prot == protcnt) { 2437 /* mark the last SGL */ 2438 sgl--; 2439 bf_set(lpfc_sli4_sge_last, sgl, 1); 2440 alldone = 1; 2441 } else if (curr_prot < protcnt) { 2442 /* advance to next prot buffer */ 2443 sgpe = sg_next(sgpe); 2444 2445 /* update the reference tag */ 2446 reftag += protgrp_blks; 2447 } else { 2448 /* if we're here, we have a bug */ 2449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2450 "9085 BLKGRD: bug in %s\n", __func__); 2451 } 2452 2453 } while (!alldone); 2454 2455 out: 2456 2457 return num_sge; 2458 } 2459 2460 /** 2461 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2462 * @phba: The Hba for which this call is being executed. 2463 * @sc: pointer to scsi command we're working on 2464 * 2465 * Given a SCSI command that supports DIF, determine composition of protection 2466 * groups involved in setting up buffer lists 2467 * 2468 * Returns: Protection group type (with or without DIF) 2469 * 2470 **/ 2471 static int 2472 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2473 { 2474 int ret = LPFC_PG_TYPE_INVALID; 2475 unsigned char op = scsi_get_prot_op(sc); 2476 2477 switch (op) { 2478 case SCSI_PROT_READ_STRIP: 2479 case SCSI_PROT_WRITE_INSERT: 2480 ret = LPFC_PG_TYPE_NO_DIF; 2481 break; 2482 case SCSI_PROT_READ_INSERT: 2483 case SCSI_PROT_WRITE_STRIP: 2484 case SCSI_PROT_READ_PASS: 2485 case SCSI_PROT_WRITE_PASS: 2486 ret = LPFC_PG_TYPE_DIF_BUF; 2487 break; 2488 default: 2489 if (phba) 2490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2491 "9021 Unsupported protection op:%d\n", 2492 op); 2493 break; 2494 } 2495 return ret; 2496 } 2497 2498 /** 2499 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2500 * @phba: The Hba for which this call is being executed. 2501 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2502 * 2503 * Adjust the data length to account for how much data 2504 * is actually on the wire. 2505 * 2506 * returns the adjusted data length 2507 **/ 2508 static int 2509 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2510 struct lpfc_io_buf *lpfc_cmd) 2511 { 2512 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2513 int fcpdl; 2514 2515 fcpdl = scsi_bufflen(sc); 2516 2517 /* Check if there is protection data on the wire */ 2518 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2519 /* Read check for protection data */ 2520 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2521 return fcpdl; 2522 2523 } else { 2524 /* Write check for protection data */ 2525 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2526 return fcpdl; 2527 } 2528 2529 /* 2530 * If we are in DIF Type 1 mode every data block has a 8 byte 2531 * DIF (trailer) attached to it. Must ajust FCP data length 2532 * to account for the protection data. 2533 */ 2534 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; 2535 2536 return fcpdl; 2537 } 2538 2539 /** 2540 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2541 * @phba: The Hba for which this call is being executed. 2542 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2543 * 2544 * This is the protection/DIF aware version of 2545 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2546 * two functions eventually, but for now, it's here. 2547 * RETURNS 0 - SUCCESS, 2548 * 1 - Failed DMA map, retry. 2549 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2550 **/ 2551 static int 2552 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2553 struct lpfc_io_buf *lpfc_cmd) 2554 { 2555 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2556 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2557 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2558 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2559 uint32_t num_bde = 0; 2560 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2561 int prot_group_type = 0; 2562 int fcpdl; 2563 int ret = 1; 2564 struct lpfc_vport *vport = phba->pport; 2565 2566 /* 2567 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2568 * fcp_rsp regions to the first data bde entry 2569 */ 2570 bpl += 2; 2571 if (scsi_sg_count(scsi_cmnd)) { 2572 /* 2573 * The driver stores the segment count returned from pci_map_sg 2574 * because this a count of dma-mappings used to map the use_sg 2575 * pages. They are not guaranteed to be the same for those 2576 * architectures that implement an IOMMU. 2577 */ 2578 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2579 scsi_sglist(scsi_cmnd), 2580 scsi_sg_count(scsi_cmnd), datadir); 2581 if (unlikely(!datasegcnt)) 2582 return 1; 2583 2584 lpfc_cmd->seg_cnt = datasegcnt; 2585 2586 /* First check if data segment count from SCSI Layer is good */ 2587 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2588 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2589 ret = 2; 2590 goto err; 2591 } 2592 2593 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2594 2595 switch (prot_group_type) { 2596 case LPFC_PG_TYPE_NO_DIF: 2597 2598 /* Here we need to add a PDE5 and PDE6 to the count */ 2599 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2600 ret = 2; 2601 goto err; 2602 } 2603 2604 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2605 datasegcnt); 2606 /* we should have 2 or more entries in buffer list */ 2607 if (num_bde < 2) { 2608 ret = 2; 2609 goto err; 2610 } 2611 break; 2612 2613 case LPFC_PG_TYPE_DIF_BUF: 2614 /* 2615 * This type indicates that protection buffers are 2616 * passed to the driver, so that needs to be prepared 2617 * for DMA 2618 */ 2619 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2620 scsi_prot_sglist(scsi_cmnd), 2621 scsi_prot_sg_count(scsi_cmnd), datadir); 2622 if (unlikely(!protsegcnt)) { 2623 scsi_dma_unmap(scsi_cmnd); 2624 return 1; 2625 } 2626 2627 lpfc_cmd->prot_seg_cnt = protsegcnt; 2628 2629 /* 2630 * There is a minimun of 4 BPLs used for every 2631 * protection data segment. 2632 */ 2633 if ((lpfc_cmd->prot_seg_cnt * 4) > 2634 (phba->cfg_total_seg_cnt - 2)) { 2635 ret = 2; 2636 goto err; 2637 } 2638 2639 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2640 datasegcnt, protsegcnt); 2641 /* we should have 3 or more entries in buffer list */ 2642 if ((num_bde < 3) || 2643 (num_bde > phba->cfg_total_seg_cnt)) { 2644 ret = 2; 2645 goto err; 2646 } 2647 break; 2648 2649 case LPFC_PG_TYPE_INVALID: 2650 default: 2651 scsi_dma_unmap(scsi_cmnd); 2652 lpfc_cmd->seg_cnt = 0; 2653 2654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2655 "9022 Unexpected protection group %i\n", 2656 prot_group_type); 2657 return 2; 2658 } 2659 } 2660 2661 /* 2662 * Finish initializing those IOCB fields that are dependent on the 2663 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2664 * reinitialized since all iocb memory resources are used many times 2665 * for transmit, receive, and continuation bpl's. 2666 */ 2667 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2668 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2669 iocb_cmd->ulpBdeCount = 1; 2670 iocb_cmd->ulpLe = 1; 2671 2672 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2673 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2674 2675 /* 2676 * Due to difference in data length between DIF/non-DIF paths, 2677 * we need to set word 4 of IOCB here 2678 */ 2679 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2680 2681 /* 2682 * For First burst, we may need to adjust the initial transfer 2683 * length for DIF 2684 */ 2685 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2686 (fcpdl < vport->cfg_first_burst_size)) 2687 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2688 2689 return 0; 2690 err: 2691 if (lpfc_cmd->seg_cnt) 2692 scsi_dma_unmap(scsi_cmnd); 2693 if (lpfc_cmd->prot_seg_cnt) 2694 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2695 scsi_prot_sg_count(scsi_cmnd), 2696 scsi_cmnd->sc_data_direction); 2697 2698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2699 "9023 Cannot setup S/G List for HBA" 2700 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2701 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2702 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2703 prot_group_type, num_bde); 2704 2705 lpfc_cmd->seg_cnt = 0; 2706 lpfc_cmd->prot_seg_cnt = 0; 2707 return ret; 2708 } 2709 2710 /* 2711 * This function calcuates the T10 DIF guard tag 2712 * on the specified data using a CRC algorithmn 2713 * using crc_t10dif. 2714 */ 2715 static uint16_t 2716 lpfc_bg_crc(uint8_t *data, int count) 2717 { 2718 uint16_t crc = 0; 2719 uint16_t x; 2720 2721 crc = crc_t10dif(data, count); 2722 x = cpu_to_be16(crc); 2723 return x; 2724 } 2725 2726 /* 2727 * This function calcuates the T10 DIF guard tag 2728 * on the specified data using a CSUM algorithmn 2729 * using ip_compute_csum. 2730 */ 2731 static uint16_t 2732 lpfc_bg_csum(uint8_t *data, int count) 2733 { 2734 uint16_t ret; 2735 2736 ret = ip_compute_csum(data, count); 2737 return ret; 2738 } 2739 2740 /* 2741 * This function examines the protection data to try to determine 2742 * what type of T10-DIF error occurred. 2743 */ 2744 static void 2745 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2746 { 2747 struct scatterlist *sgpe; /* s/g prot entry */ 2748 struct scatterlist *sgde; /* s/g data entry */ 2749 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2750 struct scsi_dif_tuple *src = NULL; 2751 uint8_t *data_src = NULL; 2752 uint16_t guard_tag; 2753 uint16_t start_app_tag, app_tag; 2754 uint32_t start_ref_tag, ref_tag; 2755 int prot, protsegcnt; 2756 int err_type, len, data_len; 2757 int chk_ref, chk_app, chk_guard; 2758 uint16_t sum; 2759 unsigned blksize; 2760 2761 err_type = BGS_GUARD_ERR_MASK; 2762 sum = 0; 2763 guard_tag = 0; 2764 2765 /* First check to see if there is protection data to examine */ 2766 prot = scsi_get_prot_op(cmd); 2767 if ((prot == SCSI_PROT_READ_STRIP) || 2768 (prot == SCSI_PROT_WRITE_INSERT) || 2769 (prot == SCSI_PROT_NORMAL)) 2770 goto out; 2771 2772 /* Currently the driver just supports ref_tag and guard_tag checking */ 2773 chk_ref = 1; 2774 chk_app = 0; 2775 chk_guard = 0; 2776 2777 /* Setup a ptr to the protection data provided by the SCSI host */ 2778 sgpe = scsi_prot_sglist(cmd); 2779 protsegcnt = lpfc_cmd->prot_seg_cnt; 2780 2781 if (sgpe && protsegcnt) { 2782 2783 /* 2784 * We will only try to verify guard tag if the segment 2785 * data length is a multiple of the blksize. 2786 */ 2787 sgde = scsi_sglist(cmd); 2788 blksize = scsi_prot_interval(cmd); 2789 data_src = (uint8_t *)sg_virt(sgde); 2790 data_len = sgde->length; 2791 if ((data_len & (blksize - 1)) == 0) 2792 chk_guard = 1; 2793 2794 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2795 start_ref_tag = scsi_prot_ref_tag(cmd); 2796 if (start_ref_tag == LPFC_INVALID_REFTAG) 2797 goto out; 2798 start_app_tag = src->app_tag; 2799 len = sgpe->length; 2800 while (src && protsegcnt) { 2801 while (len) { 2802 2803 /* 2804 * First check to see if a protection data 2805 * check is valid 2806 */ 2807 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2808 (src->app_tag == T10_PI_APP_ESCAPE)) { 2809 start_ref_tag++; 2810 goto skipit; 2811 } 2812 2813 /* First Guard Tag checking */ 2814 if (chk_guard) { 2815 guard_tag = src->guard_tag; 2816 if (cmd->prot_flags 2817 & SCSI_PROT_IP_CHECKSUM) 2818 sum = lpfc_bg_csum(data_src, 2819 blksize); 2820 else 2821 sum = lpfc_bg_crc(data_src, 2822 blksize); 2823 if ((guard_tag != sum)) { 2824 err_type = BGS_GUARD_ERR_MASK; 2825 goto out; 2826 } 2827 } 2828 2829 /* Reference Tag checking */ 2830 ref_tag = be32_to_cpu(src->ref_tag); 2831 if (chk_ref && (ref_tag != start_ref_tag)) { 2832 err_type = BGS_REFTAG_ERR_MASK; 2833 goto out; 2834 } 2835 start_ref_tag++; 2836 2837 /* App Tag checking */ 2838 app_tag = src->app_tag; 2839 if (chk_app && (app_tag != start_app_tag)) { 2840 err_type = BGS_APPTAG_ERR_MASK; 2841 goto out; 2842 } 2843 skipit: 2844 len -= sizeof(struct scsi_dif_tuple); 2845 if (len < 0) 2846 len = 0; 2847 src++; 2848 2849 data_src += blksize; 2850 data_len -= blksize; 2851 2852 /* 2853 * Are we at the end of the Data segment? 2854 * The data segment is only used for Guard 2855 * tag checking. 2856 */ 2857 if (chk_guard && (data_len == 0)) { 2858 chk_guard = 0; 2859 sgde = sg_next(sgde); 2860 if (!sgde) 2861 goto out; 2862 2863 data_src = (uint8_t *)sg_virt(sgde); 2864 data_len = sgde->length; 2865 if ((data_len & (blksize - 1)) == 0) 2866 chk_guard = 1; 2867 } 2868 } 2869 2870 /* Goto the next Protection data segment */ 2871 sgpe = sg_next(sgpe); 2872 if (sgpe) { 2873 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2874 len = sgpe->length; 2875 } else { 2876 src = NULL; 2877 } 2878 protsegcnt--; 2879 } 2880 } 2881 out: 2882 if (err_type == BGS_GUARD_ERR_MASK) { 2883 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2884 set_host_byte(cmd, DID_ABORT); 2885 phba->bg_guard_err_cnt++; 2886 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2887 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2888 scsi_prot_ref_tag(cmd), 2889 sum, guard_tag); 2890 2891 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2892 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2893 set_host_byte(cmd, DID_ABORT); 2894 2895 phba->bg_reftag_err_cnt++; 2896 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2897 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2898 scsi_prot_ref_tag(cmd), 2899 ref_tag, start_ref_tag); 2900 2901 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2902 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2903 set_host_byte(cmd, DID_ABORT); 2904 2905 phba->bg_apptag_err_cnt++; 2906 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2907 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2908 scsi_prot_ref_tag(cmd), 2909 app_tag, start_app_tag); 2910 } 2911 } 2912 2913 /* 2914 * This function checks for BlockGuard errors detected by 2915 * the HBA. In case of errors, the ASC/ASCQ fields in the 2916 * sense buffer will be set accordingly, paired with 2917 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2918 * detected corruption. 2919 * 2920 * Returns: 2921 * 0 - No error found 2922 * 1 - BlockGuard error found 2923 * -1 - Internal error (bad profile, ...etc) 2924 */ 2925 static int 2926 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2927 struct lpfc_wcqe_complete *wcqe) 2928 { 2929 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2930 int ret = 0; 2931 u32 status = bf_get(lpfc_wcqe_c_status, wcqe); 2932 u32 bghm = 0; 2933 u32 bgstat = 0; 2934 u64 failing_sector = 0; 2935 2936 if (status == CQE_STATUS_DI_ERROR) { 2937 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 2938 bgstat |= BGS_GUARD_ERR_MASK; 2939 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */ 2940 bgstat |= BGS_APPTAG_ERR_MASK; 2941 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */ 2942 bgstat |= BGS_REFTAG_ERR_MASK; 2943 2944 /* Check to see if there was any good data before the error */ 2945 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2946 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2947 bghm = wcqe->total_data_placed; 2948 } 2949 2950 /* 2951 * Set ALL the error bits to indicate we don't know what 2952 * type of error it is. 2953 */ 2954 if (!bgstat) 2955 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 2956 BGS_GUARD_ERR_MASK); 2957 } 2958 2959 if (lpfc_bgs_get_guard_err(bgstat)) { 2960 ret = 1; 2961 2962 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2963 set_host_byte(cmd, DID_ABORT); 2964 phba->bg_guard_err_cnt++; 2965 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2966 "9059 BLKGRD: Guard Tag error in cmd" 2967 " 0x%x lba 0x%llx blk cnt 0x%x " 2968 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2969 (unsigned long long)scsi_get_lba(cmd), 2970 scsi_logical_block_count(cmd), bgstat, bghm); 2971 } 2972 2973 if (lpfc_bgs_get_reftag_err(bgstat)) { 2974 ret = 1; 2975 2976 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2977 set_host_byte(cmd, DID_ABORT); 2978 2979 phba->bg_reftag_err_cnt++; 2980 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2981 "9060 BLKGRD: Ref Tag error in cmd" 2982 " 0x%x lba 0x%llx blk cnt 0x%x " 2983 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2984 (unsigned long long)scsi_get_lba(cmd), 2985 scsi_logical_block_count(cmd), bgstat, bghm); 2986 } 2987 2988 if (lpfc_bgs_get_apptag_err(bgstat)) { 2989 ret = 1; 2990 2991 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2992 set_host_byte(cmd, DID_ABORT); 2993 2994 phba->bg_apptag_err_cnt++; 2995 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2996 "9062 BLKGRD: App Tag error in cmd" 2997 " 0x%x lba 0x%llx blk cnt 0x%x " 2998 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2999 (unsigned long long)scsi_get_lba(cmd), 3000 scsi_logical_block_count(cmd), bgstat, bghm); 3001 } 3002 3003 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3004 /* 3005 * setup sense data descriptor 0 per SPC-4 as an information 3006 * field, and put the failing LBA in it. 3007 * This code assumes there was also a guard/app/ref tag error 3008 * indication. 3009 */ 3010 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3011 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3012 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3013 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3014 3015 /* bghm is a "on the wire" FC frame based count */ 3016 switch (scsi_get_prot_op(cmd)) { 3017 case SCSI_PROT_READ_INSERT: 3018 case SCSI_PROT_WRITE_STRIP: 3019 bghm /= cmd->device->sector_size; 3020 break; 3021 case SCSI_PROT_READ_STRIP: 3022 case SCSI_PROT_WRITE_INSERT: 3023 case SCSI_PROT_READ_PASS: 3024 case SCSI_PROT_WRITE_PASS: 3025 bghm /= (cmd->device->sector_size + 3026 sizeof(struct scsi_dif_tuple)); 3027 break; 3028 } 3029 3030 failing_sector = scsi_get_lba(cmd); 3031 failing_sector += bghm; 3032 3033 /* Descriptor Information */ 3034 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3035 } 3036 3037 if (!ret) { 3038 /* No error was reported - problem in FW? */ 3039 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3040 "9068 BLKGRD: Unknown error in cmd" 3041 " 0x%x lba 0x%llx blk cnt 0x%x " 3042 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3043 (unsigned long long)scsi_get_lba(cmd), 3044 scsi_logical_block_count(cmd), bgstat, bghm); 3045 3046 /* Calculate what type of error it was */ 3047 lpfc_calc_bg_err(phba, lpfc_cmd); 3048 } 3049 return ret; 3050 } 3051 3052 /* 3053 * This function checks for BlockGuard errors detected by 3054 * the HBA. In case of errors, the ASC/ASCQ fields in the 3055 * sense buffer will be set accordingly, paired with 3056 * ILLEGAL_REQUEST to signal to the kernel that the HBA 3057 * detected corruption. 3058 * 3059 * Returns: 3060 * 0 - No error found 3061 * 1 - BlockGuard error found 3062 * -1 - Internal error (bad profile, ...etc) 3063 */ 3064 static int 3065 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 3066 struct lpfc_iocbq *pIocbOut) 3067 { 3068 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 3069 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 3070 int ret = 0; 3071 uint32_t bghm = bgf->bghm; 3072 uint32_t bgstat = bgf->bgstat; 3073 uint64_t failing_sector = 0; 3074 3075 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3076 cmd->result = DID_ERROR << 16; 3077 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3078 "9072 BLKGRD: Invalid BG Profile in cmd " 3079 "0x%x reftag 0x%x blk cnt 0x%x " 3080 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3081 scsi_prot_ref_tag(cmd), 3082 scsi_logical_block_count(cmd), bgstat, bghm); 3083 ret = (-1); 3084 goto out; 3085 } 3086 3087 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3088 cmd->result = DID_ERROR << 16; 3089 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3090 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 3091 "0x%x reftag 0x%x blk cnt 0x%x " 3092 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3093 scsi_prot_ref_tag(cmd), 3094 scsi_logical_block_count(cmd), bgstat, bghm); 3095 ret = (-1); 3096 goto out; 3097 } 3098 3099 if (lpfc_bgs_get_guard_err(bgstat)) { 3100 ret = 1; 3101 3102 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 3103 set_host_byte(cmd, DID_ABORT); 3104 phba->bg_guard_err_cnt++; 3105 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3106 "9055 BLKGRD: Guard Tag error in cmd " 3107 "0x%x reftag 0x%x blk cnt 0x%x " 3108 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3109 scsi_prot_ref_tag(cmd), 3110 scsi_logical_block_count(cmd), bgstat, bghm); 3111 } 3112 3113 if (lpfc_bgs_get_reftag_err(bgstat)) { 3114 ret = 1; 3115 3116 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3117 set_host_byte(cmd, DID_ABORT); 3118 3119 phba->bg_reftag_err_cnt++; 3120 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3121 "9056 BLKGRD: Ref Tag error in cmd " 3122 "0x%x reftag 0x%x blk cnt 0x%x " 3123 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3124 scsi_prot_ref_tag(cmd), 3125 scsi_logical_block_count(cmd), bgstat, bghm); 3126 } 3127 3128 if (lpfc_bgs_get_apptag_err(bgstat)) { 3129 ret = 1; 3130 3131 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3132 set_host_byte(cmd, DID_ABORT); 3133 3134 phba->bg_apptag_err_cnt++; 3135 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3136 "9061 BLKGRD: App Tag error in cmd " 3137 "0x%x reftag 0x%x blk cnt 0x%x " 3138 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3139 scsi_prot_ref_tag(cmd), 3140 scsi_logical_block_count(cmd), bgstat, bghm); 3141 } 3142 3143 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3144 /* 3145 * setup sense data descriptor 0 per SPC-4 as an information 3146 * field, and put the failing LBA in it. 3147 * This code assumes there was also a guard/app/ref tag error 3148 * indication. 3149 */ 3150 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3151 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3152 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3153 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3154 3155 /* bghm is a "on the wire" FC frame based count */ 3156 switch (scsi_get_prot_op(cmd)) { 3157 case SCSI_PROT_READ_INSERT: 3158 case SCSI_PROT_WRITE_STRIP: 3159 bghm /= cmd->device->sector_size; 3160 break; 3161 case SCSI_PROT_READ_STRIP: 3162 case SCSI_PROT_WRITE_INSERT: 3163 case SCSI_PROT_READ_PASS: 3164 case SCSI_PROT_WRITE_PASS: 3165 bghm /= (cmd->device->sector_size + 3166 sizeof(struct scsi_dif_tuple)); 3167 break; 3168 } 3169 3170 failing_sector = scsi_get_lba(cmd); 3171 failing_sector += bghm; 3172 3173 /* Descriptor Information */ 3174 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3175 } 3176 3177 if (!ret) { 3178 /* No error was reported - problem in FW? */ 3179 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3180 "9057 BLKGRD: Unknown error in cmd " 3181 "0x%x reftag 0x%x blk cnt 0x%x " 3182 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3183 scsi_prot_ref_tag(cmd), 3184 scsi_logical_block_count(cmd), bgstat, bghm); 3185 3186 /* Calculate what type of error it was */ 3187 lpfc_calc_bg_err(phba, lpfc_cmd); 3188 } 3189 out: 3190 return ret; 3191 } 3192 3193 /** 3194 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3195 * @phba: The Hba for which this call is being executed. 3196 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3197 * 3198 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3199 * field of @lpfc_cmd for device with SLI-4 interface spec. 3200 * 3201 * Return codes: 3202 * 2 - Error - Do not retry 3203 * 1 - Error - Retry 3204 * 0 - Success 3205 **/ 3206 static int 3207 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3208 { 3209 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3210 struct scatterlist *sgel = NULL; 3211 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3212 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3213 struct sli4_sge *first_data_sgl; 3214 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3215 struct lpfc_vport *vport = phba->pport; 3216 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3217 dma_addr_t physaddr; 3218 uint32_t num_bde = 0; 3219 uint32_t dma_len; 3220 uint32_t dma_offset = 0; 3221 int nseg, i, j; 3222 struct ulp_bde64 *bde; 3223 bool lsp_just_set = false; 3224 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3225 3226 /* 3227 * There are three possibilities here - use scatter-gather segment, use 3228 * the single mapping, or neither. Start the lpfc command prep by 3229 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3230 * data bde entry. 3231 */ 3232 if (scsi_sg_count(scsi_cmnd)) { 3233 /* 3234 * The driver stores the segment count returned from pci_map_sg 3235 * because this a count of dma-mappings used to map the use_sg 3236 * pages. They are not guaranteed to be the same for those 3237 * architectures that implement an IOMMU. 3238 */ 3239 3240 nseg = scsi_dma_map(scsi_cmnd); 3241 if (unlikely(nseg <= 0)) 3242 return 1; 3243 sgl += 1; 3244 /* clear the last flag in the fcp_rsp map entry */ 3245 sgl->word2 = le32_to_cpu(sgl->word2); 3246 bf_set(lpfc_sli4_sge_last, sgl, 0); 3247 sgl->word2 = cpu_to_le32(sgl->word2); 3248 sgl += 1; 3249 first_data_sgl = sgl; 3250 lpfc_cmd->seg_cnt = nseg; 3251 if (!phba->cfg_xpsgl && 3252 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3254 "9074 BLKGRD:" 3255 " %s: Too many sg segments from " 3256 "dma_map_sg. Config %d, seg_cnt %d\n", 3257 __func__, phba->cfg_sg_seg_cnt, 3258 lpfc_cmd->seg_cnt); 3259 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3260 lpfc_cmd->seg_cnt = 0; 3261 scsi_dma_unmap(scsi_cmnd); 3262 return 2; 3263 } 3264 3265 /* 3266 * The driver established a maximum scatter-gather segment count 3267 * during probe that limits the number of sg elements in any 3268 * single scsi command. Just run through the seg_cnt and format 3269 * the sge's. 3270 * When using SLI-3 the driver will try to fit all the BDEs into 3271 * the IOCB. If it can't then the BDEs get added to a BPL as it 3272 * does for SLI-2 mode. 3273 */ 3274 3275 /* for tracking segment boundaries */ 3276 sgel = scsi_sglist(scsi_cmnd); 3277 j = 2; 3278 for (i = 0; i < nseg; i++) { 3279 sgl->word2 = 0; 3280 if ((num_bde + 1) == nseg) { 3281 bf_set(lpfc_sli4_sge_last, sgl, 1); 3282 bf_set(lpfc_sli4_sge_type, sgl, 3283 LPFC_SGE_TYPE_DATA); 3284 } else { 3285 bf_set(lpfc_sli4_sge_last, sgl, 0); 3286 3287 /* do we need to expand the segment */ 3288 if (!lsp_just_set && 3289 !((j + 1) % phba->border_sge_num) && 3290 ((nseg - 1) != i)) { 3291 /* set LSP type */ 3292 bf_set(lpfc_sli4_sge_type, sgl, 3293 LPFC_SGE_TYPE_LSP); 3294 3295 sgl_xtra = lpfc_get_sgl_per_hdwq( 3296 phba, lpfc_cmd); 3297 3298 if (unlikely(!sgl_xtra)) { 3299 lpfc_cmd->seg_cnt = 0; 3300 scsi_dma_unmap(scsi_cmnd); 3301 return 1; 3302 } 3303 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3304 sgl_xtra->dma_phys_sgl)); 3305 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3306 sgl_xtra->dma_phys_sgl)); 3307 3308 } else { 3309 bf_set(lpfc_sli4_sge_type, sgl, 3310 LPFC_SGE_TYPE_DATA); 3311 } 3312 } 3313 3314 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3315 LPFC_SGE_TYPE_LSP)) { 3316 if ((nseg - 1) == i) 3317 bf_set(lpfc_sli4_sge_last, sgl, 1); 3318 3319 physaddr = sg_dma_address(sgel); 3320 dma_len = sg_dma_len(sgel); 3321 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3322 physaddr)); 3323 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3324 physaddr)); 3325 3326 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3327 sgl->word2 = cpu_to_le32(sgl->word2); 3328 sgl->sge_len = cpu_to_le32(dma_len); 3329 3330 dma_offset += dma_len; 3331 sgel = sg_next(sgel); 3332 3333 sgl++; 3334 lsp_just_set = false; 3335 3336 } else { 3337 sgl->word2 = cpu_to_le32(sgl->word2); 3338 sgl->sge_len = cpu_to_le32( 3339 phba->cfg_sg_dma_buf_size); 3340 3341 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3342 i = i - 1; 3343 3344 lsp_just_set = true; 3345 } 3346 3347 j++; 3348 } 3349 /* 3350 * Setup the first Payload BDE. For FCoE we just key off 3351 * Performance Hints, for FC we use lpfc_enable_pbde. 3352 * We populate words 13-15 of IOCB/WQE. 3353 */ 3354 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3355 phba->cfg_enable_pbde) { 3356 bde = (struct ulp_bde64 *) 3357 &wqe->words[13]; 3358 bde->addrLow = first_data_sgl->addr_lo; 3359 bde->addrHigh = first_data_sgl->addr_hi; 3360 bde->tus.f.bdeSize = 3361 le32_to_cpu(first_data_sgl->sge_len); 3362 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3363 bde->tus.w = cpu_to_le32(bde->tus.w); 3364 3365 } else { 3366 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3367 } 3368 } else { 3369 sgl += 1; 3370 /* clear the last flag in the fcp_rsp map entry */ 3371 sgl->word2 = le32_to_cpu(sgl->word2); 3372 bf_set(lpfc_sli4_sge_last, sgl, 1); 3373 sgl->word2 = cpu_to_le32(sgl->word2); 3374 3375 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3376 phba->cfg_enable_pbde) { 3377 bde = (struct ulp_bde64 *) 3378 &wqe->words[13]; 3379 memset(bde, 0, (sizeof(uint32_t) * 3)); 3380 } 3381 } 3382 3383 /* Word 11 */ 3384 if (phba->cfg_enable_pbde) 3385 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3386 3387 /* 3388 * Finish initializing those IOCB fields that are dependent on the 3389 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3390 * explicitly reinitialized. 3391 * all iocb memory resources are reused. 3392 */ 3393 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3394 /* Set first-burst provided it was successfully negotiated */ 3395 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3396 vport->cfg_first_burst_size && 3397 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3398 u32 init_len, total_len; 3399 3400 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3401 init_len = min(total_len, vport->cfg_first_burst_size); 3402 3403 /* Word 4 & 5 */ 3404 wqe->fcp_iwrite.initial_xfer_len = init_len; 3405 wqe->fcp_iwrite.total_xfer_len = total_len; 3406 } else { 3407 /* Word 4 */ 3408 wqe->fcp_iwrite.total_xfer_len = 3409 be32_to_cpu(fcp_cmnd->fcpDl); 3410 } 3411 3412 /* 3413 * If the OAS driver feature is enabled and the lun is enabled for 3414 * OAS, set the oas iocb related flags. 3415 */ 3416 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3417 scsi_cmnd->device->hostdata)->oas_enabled) { 3418 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3419 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3420 scsi_cmnd->device->hostdata)->priority; 3421 3422 /* Word 10 */ 3423 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3424 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3425 3426 if (lpfc_cmd->cur_iocbq.priority) 3427 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3428 (lpfc_cmd->cur_iocbq.priority << 1)); 3429 else 3430 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3431 (phba->cfg_XLanePriority << 1)); 3432 } 3433 3434 return 0; 3435 } 3436 3437 /** 3438 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3439 * @phba: The Hba for which this call is being executed. 3440 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3441 * 3442 * This is the protection/DIF aware version of 3443 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3444 * two functions eventually, but for now, it's here 3445 * Return codes: 3446 * 2 - Error - Do not retry 3447 * 1 - Error - Retry 3448 * 0 - Success 3449 **/ 3450 static int 3451 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3452 struct lpfc_io_buf *lpfc_cmd) 3453 { 3454 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3455 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3456 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3457 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3458 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3459 uint32_t num_sge = 0; 3460 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3461 int prot_group_type = 0; 3462 int fcpdl; 3463 int ret = 1; 3464 struct lpfc_vport *vport = phba->pport; 3465 3466 /* 3467 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3468 * fcp_rsp regions to the first data sge entry 3469 */ 3470 if (scsi_sg_count(scsi_cmnd)) { 3471 /* 3472 * The driver stores the segment count returned from pci_map_sg 3473 * because this a count of dma-mappings used to map the use_sg 3474 * pages. They are not guaranteed to be the same for those 3475 * architectures that implement an IOMMU. 3476 */ 3477 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3478 scsi_sglist(scsi_cmnd), 3479 scsi_sg_count(scsi_cmnd), datadir); 3480 if (unlikely(!datasegcnt)) 3481 return 1; 3482 3483 sgl += 1; 3484 /* clear the last flag in the fcp_rsp map entry */ 3485 sgl->word2 = le32_to_cpu(sgl->word2); 3486 bf_set(lpfc_sli4_sge_last, sgl, 0); 3487 sgl->word2 = cpu_to_le32(sgl->word2); 3488 3489 sgl += 1; 3490 lpfc_cmd->seg_cnt = datasegcnt; 3491 3492 /* First check if data segment count from SCSI Layer is good */ 3493 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3494 !phba->cfg_xpsgl) { 3495 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3496 ret = 2; 3497 goto err; 3498 } 3499 3500 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3501 3502 switch (prot_group_type) { 3503 case LPFC_PG_TYPE_NO_DIF: 3504 /* Here we need to add a DISEED to the count */ 3505 if (((lpfc_cmd->seg_cnt + 1) > 3506 phba->cfg_total_seg_cnt) && 3507 !phba->cfg_xpsgl) { 3508 ret = 2; 3509 goto err; 3510 } 3511 3512 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3513 datasegcnt, lpfc_cmd); 3514 3515 /* we should have 2 or more entries in buffer list */ 3516 if (num_sge < 2) { 3517 ret = 2; 3518 goto err; 3519 } 3520 break; 3521 3522 case LPFC_PG_TYPE_DIF_BUF: 3523 /* 3524 * This type indicates that protection buffers are 3525 * passed to the driver, so that needs to be prepared 3526 * for DMA 3527 */ 3528 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3529 scsi_prot_sglist(scsi_cmnd), 3530 scsi_prot_sg_count(scsi_cmnd), datadir); 3531 if (unlikely(!protsegcnt)) { 3532 scsi_dma_unmap(scsi_cmnd); 3533 return 1; 3534 } 3535 3536 lpfc_cmd->prot_seg_cnt = protsegcnt; 3537 /* 3538 * There is a minimun of 3 SGEs used for every 3539 * protection data segment. 3540 */ 3541 if (((lpfc_cmd->prot_seg_cnt * 3) > 3542 (phba->cfg_total_seg_cnt - 2)) && 3543 !phba->cfg_xpsgl) { 3544 ret = 2; 3545 goto err; 3546 } 3547 3548 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3549 datasegcnt, protsegcnt, lpfc_cmd); 3550 3551 /* we should have 3 or more entries in buffer list */ 3552 if (num_sge < 3 || 3553 (num_sge > phba->cfg_total_seg_cnt && 3554 !phba->cfg_xpsgl)) { 3555 ret = 2; 3556 goto err; 3557 } 3558 break; 3559 3560 case LPFC_PG_TYPE_INVALID: 3561 default: 3562 scsi_dma_unmap(scsi_cmnd); 3563 lpfc_cmd->seg_cnt = 0; 3564 3565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3566 "9083 Unexpected protection group %i\n", 3567 prot_group_type); 3568 return 2; 3569 } 3570 } 3571 3572 switch (scsi_get_prot_op(scsi_cmnd)) { 3573 case SCSI_PROT_WRITE_STRIP: 3574 case SCSI_PROT_READ_STRIP: 3575 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; 3576 break; 3577 case SCSI_PROT_WRITE_INSERT: 3578 case SCSI_PROT_READ_INSERT: 3579 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; 3580 break; 3581 case SCSI_PROT_WRITE_PASS: 3582 case SCSI_PROT_READ_PASS: 3583 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; 3584 break; 3585 } 3586 3587 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3588 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3589 3590 /* Set first-burst provided it was successfully negotiated */ 3591 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3592 vport->cfg_first_burst_size && 3593 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3594 u32 init_len, total_len; 3595 3596 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3597 init_len = min(total_len, vport->cfg_first_burst_size); 3598 3599 /* Word 4 & 5 */ 3600 wqe->fcp_iwrite.initial_xfer_len = init_len; 3601 wqe->fcp_iwrite.total_xfer_len = total_len; 3602 } else { 3603 /* Word 4 */ 3604 wqe->fcp_iwrite.total_xfer_len = 3605 be32_to_cpu(fcp_cmnd->fcpDl); 3606 } 3607 3608 /* 3609 * If the OAS driver feature is enabled and the lun is enabled for 3610 * OAS, set the oas iocb related flags. 3611 */ 3612 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3613 scsi_cmnd->device->hostdata)->oas_enabled) { 3614 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3615 3616 /* Word 10 */ 3617 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3618 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3619 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3620 (phba->cfg_XLanePriority << 1)); 3621 } 3622 3623 /* Word 7. DIF Flags */ 3624 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS) 3625 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3626 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP) 3627 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3628 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT) 3629 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3630 3631 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS | 3632 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3633 3634 return 0; 3635 err: 3636 if (lpfc_cmd->seg_cnt) 3637 scsi_dma_unmap(scsi_cmnd); 3638 if (lpfc_cmd->prot_seg_cnt) 3639 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3640 scsi_prot_sg_count(scsi_cmnd), 3641 scsi_cmnd->sc_data_direction); 3642 3643 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3644 "9084 Cannot setup S/G List for HBA" 3645 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3646 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3647 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3648 prot_group_type, num_sge); 3649 3650 lpfc_cmd->seg_cnt = 0; 3651 lpfc_cmd->prot_seg_cnt = 0; 3652 return ret; 3653 } 3654 3655 /** 3656 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3657 * @phba: The Hba for which this call is being executed. 3658 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3659 * 3660 * This routine wraps the actual DMA mapping function pointer from the 3661 * lpfc_hba struct. 3662 * 3663 * Return codes: 3664 * 1 - Error 3665 * 0 - Success 3666 **/ 3667 static inline int 3668 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3669 { 3670 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3671 } 3672 3673 /** 3674 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3675 * using BlockGuard. 3676 * @phba: The Hba for which this call is being executed. 3677 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3678 * 3679 * This routine wraps the actual DMA mapping function pointer from the 3680 * lpfc_hba struct. 3681 * 3682 * Return codes: 3683 * 1 - Error 3684 * 0 - Success 3685 **/ 3686 static inline int 3687 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3688 { 3689 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3690 } 3691 3692 /** 3693 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3694 * buffer 3695 * @vport: Pointer to vport object. 3696 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3697 * @tmo: Timeout value for IO 3698 * 3699 * This routine initializes IOCB/WQE data structure from scsi command 3700 * 3701 * Return codes: 3702 * 1 - Error 3703 * 0 - Success 3704 **/ 3705 static inline int 3706 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3707 uint8_t tmo) 3708 { 3709 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3710 } 3711 3712 /** 3713 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3714 * @phba: Pointer to hba context object. 3715 * @vport: Pointer to vport object. 3716 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3717 * @fcpi_parm: FCP Initiator parameter. 3718 * 3719 * This function posts an event when there is a SCSI command reporting 3720 * error from the scsi device. 3721 **/ 3722 static void 3723 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3724 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3725 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3726 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3727 uint32_t resp_info = fcprsp->rspStatus2; 3728 uint32_t scsi_status = fcprsp->rspStatus3; 3729 struct lpfc_fast_path_event *fast_path_evt = NULL; 3730 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3731 unsigned long flags; 3732 3733 if (!pnode) 3734 return; 3735 3736 /* If there is queuefull or busy condition send a scsi event */ 3737 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3738 (cmnd->result == SAM_STAT_BUSY)) { 3739 fast_path_evt = lpfc_alloc_fast_evt(phba); 3740 if (!fast_path_evt) 3741 return; 3742 fast_path_evt->un.scsi_evt.event_type = 3743 FC_REG_SCSI_EVENT; 3744 fast_path_evt->un.scsi_evt.subcategory = 3745 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3746 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3747 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3748 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3749 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3750 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3751 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3752 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3753 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3754 fast_path_evt = lpfc_alloc_fast_evt(phba); 3755 if (!fast_path_evt) 3756 return; 3757 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3758 FC_REG_SCSI_EVENT; 3759 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3760 LPFC_EVENT_CHECK_COND; 3761 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3762 cmnd->device->lun; 3763 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3764 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3765 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3766 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3767 fast_path_evt->un.check_cond_evt.sense_key = 3768 cmnd->sense_buffer[2] & 0xf; 3769 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3770 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3771 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3772 fcpi_parm && 3773 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3774 ((scsi_status == SAM_STAT_GOOD) && 3775 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3776 /* 3777 * If status is good or resid does not match with fcp_param and 3778 * there is valid fcpi_parm, then there is a read_check error 3779 */ 3780 fast_path_evt = lpfc_alloc_fast_evt(phba); 3781 if (!fast_path_evt) 3782 return; 3783 fast_path_evt->un.read_check_error.header.event_type = 3784 FC_REG_FABRIC_EVENT; 3785 fast_path_evt->un.read_check_error.header.subcategory = 3786 LPFC_EVENT_FCPRDCHKERR; 3787 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3788 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3789 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3790 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3791 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3792 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3793 fast_path_evt->un.read_check_error.fcpiparam = 3794 fcpi_parm; 3795 } else 3796 return; 3797 3798 fast_path_evt->vport = vport; 3799 spin_lock_irqsave(&phba->hbalock, flags); 3800 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3801 spin_unlock_irqrestore(&phba->hbalock, flags); 3802 lpfc_worker_wake_up(phba); 3803 return; 3804 } 3805 3806 /** 3807 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3808 * @phba: The HBA for which this call is being executed. 3809 * @psb: The scsi buffer which is going to be un-mapped. 3810 * 3811 * This routine does DMA un-mapping of scatter gather list of scsi command 3812 * field of @lpfc_cmd for device with SLI-3 interface spec. 3813 **/ 3814 static void 3815 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3816 { 3817 /* 3818 * There are only two special cases to consider. (1) the scsi command 3819 * requested scatter-gather usage or (2) the scsi command allocated 3820 * a request buffer, but did not request use_sg. There is a third 3821 * case, but it does not require resource deallocation. 3822 */ 3823 if (psb->seg_cnt > 0) 3824 scsi_dma_unmap(psb->pCmd); 3825 if (psb->prot_seg_cnt > 0) 3826 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3827 scsi_prot_sg_count(psb->pCmd), 3828 psb->pCmd->sc_data_direction); 3829 } 3830 3831 /** 3832 * lpfc_unblock_requests - allow further commands to be queued. 3833 * @phba: pointer to phba object 3834 * 3835 * For single vport, just call scsi_unblock_requests on physical port. 3836 * For multiple vports, send scsi_unblock_requests for all the vports. 3837 */ 3838 void 3839 lpfc_unblock_requests(struct lpfc_hba *phba) 3840 { 3841 struct lpfc_vport **vports; 3842 struct Scsi_Host *shost; 3843 int i; 3844 3845 if (phba->sli_rev == LPFC_SLI_REV4 && 3846 !phba->sli4_hba.max_cfg_param.vpi_used) { 3847 shost = lpfc_shost_from_vport(phba->pport); 3848 scsi_unblock_requests(shost); 3849 return; 3850 } 3851 3852 vports = lpfc_create_vport_work_array(phba); 3853 if (vports != NULL) 3854 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3855 shost = lpfc_shost_from_vport(vports[i]); 3856 scsi_unblock_requests(shost); 3857 } 3858 lpfc_destroy_vport_work_array(phba, vports); 3859 } 3860 3861 /** 3862 * lpfc_block_requests - prevent further commands from being queued. 3863 * @phba: pointer to phba object 3864 * 3865 * For single vport, just call scsi_block_requests on physical port. 3866 * For multiple vports, send scsi_block_requests for all the vports. 3867 */ 3868 void 3869 lpfc_block_requests(struct lpfc_hba *phba) 3870 { 3871 struct lpfc_vport **vports; 3872 struct Scsi_Host *shost; 3873 int i; 3874 3875 if (atomic_read(&phba->cmf_stop_io)) 3876 return; 3877 3878 if (phba->sli_rev == LPFC_SLI_REV4 && 3879 !phba->sli4_hba.max_cfg_param.vpi_used) { 3880 shost = lpfc_shost_from_vport(phba->pport); 3881 scsi_block_requests(shost); 3882 return; 3883 } 3884 3885 vports = lpfc_create_vport_work_array(phba); 3886 if (vports != NULL) 3887 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3888 shost = lpfc_shost_from_vport(vports[i]); 3889 scsi_block_requests(shost); 3890 } 3891 lpfc_destroy_vport_work_array(phba, vports); 3892 } 3893 3894 /** 3895 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion 3896 * @phba: The HBA for which this call is being executed. 3897 * @time: The latency of the IO that completed (in ns) 3898 * @size: The size of the IO that completed 3899 * @shost: SCSI host the IO completed on (NULL for a NVME IO) 3900 * 3901 * The routine adjusts the various Burst and Bandwidth counters used in 3902 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, 3903 * that means the IO was never issued to the HBA, so this routine is 3904 * just being called to cleanup the counter from a previous 3905 * lpfc_update_cmf_cmd call. 3906 */ 3907 int 3908 lpfc_update_cmf_cmpl(struct lpfc_hba *phba, 3909 uint64_t time, uint32_t size, struct Scsi_Host *shost) 3910 { 3911 struct lpfc_cgn_stat *cgs; 3912 3913 if (time != LPFC_CGN_NOT_SENT) { 3914 /* lat is ns coming in, save latency in us */ 3915 if (time < 1000) 3916 time = 1; 3917 else 3918 time = div_u64(time + 500, 1000); /* round it */ 3919 3920 cgs = this_cpu_ptr(phba->cmf_stat); 3921 atomic64_add(size, &cgs->rcv_bytes); 3922 atomic64_add(time, &cgs->rx_latency); 3923 atomic_inc(&cgs->rx_io_cnt); 3924 } 3925 return 0; 3926 } 3927 3928 /** 3929 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission 3930 * @phba: The HBA for which this call is being executed. 3931 * @size: The size of the IO that will be issued 3932 * 3933 * The routine adjusts the various Burst and Bandwidth counters used in 3934 * Congestion management and E2E. 3935 */ 3936 int 3937 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) 3938 { 3939 uint64_t total; 3940 struct lpfc_cgn_stat *cgs; 3941 int cpu; 3942 3943 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ 3944 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) { 3945 total = 0; 3946 for_each_present_cpu(cpu) { 3947 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3948 total += atomic64_read(&cgs->total_bytes); 3949 } 3950 if (total >= phba->cmf_max_bytes_per_interval) { 3951 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { 3952 lpfc_block_requests(phba); 3953 phba->cmf_last_ts = 3954 lpfc_calc_cmf_latency(phba); 3955 } 3956 atomic_inc(&phba->cmf_busy); 3957 return -EBUSY; 3958 } 3959 if (size > atomic_read(&phba->rx_max_read_cnt)) 3960 atomic_set(&phba->rx_max_read_cnt, size); 3961 } 3962 3963 cgs = this_cpu_ptr(phba->cmf_stat); 3964 atomic64_add(size, &cgs->total_bytes); 3965 return 0; 3966 } 3967 3968 /** 3969 * lpfc_handle_fcp_err - FCP response handler 3970 * @vport: The virtual port for which this call is being executed. 3971 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3972 * @fcpi_parm: FCP Initiator parameter. 3973 * 3974 * This routine is called to process response IOCB with status field 3975 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3976 * based upon SCSI and FCP error. 3977 **/ 3978 static void 3979 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3980 uint32_t fcpi_parm) 3981 { 3982 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3983 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3984 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3985 uint32_t resp_info = fcprsp->rspStatus2; 3986 uint32_t scsi_status = fcprsp->rspStatus3; 3987 uint32_t *lp; 3988 uint32_t host_status = DID_OK; 3989 uint32_t rsplen = 0; 3990 uint32_t fcpDl; 3991 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3992 3993 3994 /* 3995 * If this is a task management command, there is no 3996 * scsi packet associated with this lpfc_cmd. The driver 3997 * consumes it. 3998 */ 3999 if (fcpcmd->fcpCntl2) { 4000 scsi_status = 0; 4001 goto out; 4002 } 4003 4004 if (resp_info & RSP_LEN_VALID) { 4005 rsplen = be32_to_cpu(fcprsp->rspRspLen); 4006 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 4007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4008 "2719 Invalid response length: " 4009 "tgt x%x lun x%llx cmnd x%x rsplen " 4010 "x%x\n", cmnd->device->id, 4011 cmnd->device->lun, cmnd->cmnd[0], 4012 rsplen); 4013 host_status = DID_ERROR; 4014 goto out; 4015 } 4016 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 4017 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4018 "2757 Protocol failure detected during " 4019 "processing of FCP I/O op: " 4020 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 4021 cmnd->device->id, 4022 cmnd->device->lun, cmnd->cmnd[0], 4023 fcprsp->rspInfo3); 4024 host_status = DID_ERROR; 4025 goto out; 4026 } 4027 } 4028 4029 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 4030 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 4031 if (snslen > SCSI_SENSE_BUFFERSIZE) 4032 snslen = SCSI_SENSE_BUFFERSIZE; 4033 4034 if (resp_info & RSP_LEN_VALID) 4035 rsplen = be32_to_cpu(fcprsp->rspRspLen); 4036 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 4037 } 4038 lp = (uint32_t *)cmnd->sense_buffer; 4039 4040 /* special handling for under run conditions */ 4041 if (!scsi_status && (resp_info & RESID_UNDER)) { 4042 /* don't log under runs if fcp set... */ 4043 if (vport->cfg_log_verbose & LOG_FCP) 4044 logit = LOG_FCP_ERROR; 4045 /* unless operator says so */ 4046 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 4047 logit = LOG_FCP_UNDER; 4048 } 4049 4050 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4051 "9024 FCP command x%x failed: x%x SNS x%x x%x " 4052 "Data: x%x x%x x%x x%x x%x\n", 4053 cmnd->cmnd[0], scsi_status, 4054 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 4055 be32_to_cpu(fcprsp->rspResId), 4056 be32_to_cpu(fcprsp->rspSnsLen), 4057 be32_to_cpu(fcprsp->rspRspLen), 4058 fcprsp->rspInfo3); 4059 4060 scsi_set_resid(cmnd, 0); 4061 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 4062 if (resp_info & RESID_UNDER) { 4063 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 4064 4065 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 4066 "9025 FCP Underrun, expected %d, " 4067 "residual %d Data: x%x x%x x%x\n", 4068 fcpDl, 4069 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 4070 cmnd->underflow); 4071 4072 /* 4073 * If there is an under run, check if under run reported by 4074 * storage array is same as the under run reported by HBA. 4075 * If this is not same, there is a dropped frame. 4076 */ 4077 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 4078 lpfc_printf_vlog(vport, KERN_WARNING, 4079 LOG_FCP | LOG_FCP_ERROR, 4080 "9026 FCP Read Check Error " 4081 "and Underrun Data: x%x x%x x%x x%x\n", 4082 fcpDl, 4083 scsi_get_resid(cmnd), fcpi_parm, 4084 cmnd->cmnd[0]); 4085 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4086 host_status = DID_ERROR; 4087 } 4088 /* 4089 * The cmnd->underflow is the minimum number of bytes that must 4090 * be transferred for this command. Provided a sense condition 4091 * is not present, make sure the actual amount transferred is at 4092 * least the underflow value or fail. 4093 */ 4094 if (!(resp_info & SNS_LEN_VALID) && 4095 (scsi_status == SAM_STAT_GOOD) && 4096 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 4097 < cmnd->underflow)) { 4098 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4099 "9027 FCP command x%x residual " 4100 "underrun converted to error " 4101 "Data: x%x x%x x%x\n", 4102 cmnd->cmnd[0], scsi_bufflen(cmnd), 4103 scsi_get_resid(cmnd), cmnd->underflow); 4104 host_status = DID_ERROR; 4105 } 4106 } else if (resp_info & RESID_OVER) { 4107 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4108 "9028 FCP command x%x residual overrun error. " 4109 "Data: x%x x%x\n", cmnd->cmnd[0], 4110 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 4111 host_status = DID_ERROR; 4112 4113 /* 4114 * Check SLI validation that all the transfer was actually done 4115 * (fcpi_parm should be zero). Apply check only to reads. 4116 */ 4117 } else if (fcpi_parm) { 4118 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 4119 "9029 FCP %s Check Error Data: " 4120 "x%x x%x x%x x%x x%x\n", 4121 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 4122 "Read" : "Write"), 4123 fcpDl, be32_to_cpu(fcprsp->rspResId), 4124 fcpi_parm, cmnd->cmnd[0], scsi_status); 4125 4126 /* There is some issue with the LPe12000 that causes it 4127 * to miscalculate the fcpi_parm and falsely trip this 4128 * recovery logic. Detect this case and don't error when true. 4129 */ 4130 if (fcpi_parm > fcpDl) 4131 goto out; 4132 4133 switch (scsi_status) { 4134 case SAM_STAT_GOOD: 4135 case SAM_STAT_CHECK_CONDITION: 4136 /* Fabric dropped a data frame. Fail any successful 4137 * command in which we detected dropped frames. 4138 * A status of good or some check conditions could 4139 * be considered a successful command. 4140 */ 4141 host_status = DID_ERROR; 4142 break; 4143 } 4144 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4145 } 4146 4147 out: 4148 cmnd->result = host_status << 16 | scsi_status; 4149 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 4150 } 4151 4152 /** 4153 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 4154 * @phba: The hba for which this call is being executed. 4155 * @pwqeIn: The command WQE for the scsi cmnd. 4156 * @wcqe: Pointer to driver response CQE object. 4157 * 4158 * This routine assigns scsi command result by looking into response WQE 4159 * status field appropriately. This routine handles QUEUE FULL condition as 4160 * well by ramping down device queue depth. 4161 **/ 4162 static void 4163 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 4164 struct lpfc_wcqe_complete *wcqe) 4165 { 4166 struct lpfc_io_buf *lpfc_cmd = 4167 (struct lpfc_io_buf *)pwqeIn->context1; 4168 struct lpfc_vport *vport = pwqeIn->vport; 4169 struct lpfc_rport_data *rdata; 4170 struct lpfc_nodelist *ndlp; 4171 struct scsi_cmnd *cmd; 4172 unsigned long flags; 4173 struct lpfc_fast_path_event *fast_path_evt; 4174 struct Scsi_Host *shost; 4175 u32 logit = LOG_FCP; 4176 u32 status, idx; 4177 unsigned long iflags = 0; 4178 u32 lat; 4179 u8 wait_xb_clr = 0; 4180 4181 /* Sanity check on return of outstanding command */ 4182 if (!lpfc_cmd) { 4183 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4184 "9032 Null lpfc_cmd pointer. No " 4185 "release, skip completion\n"); 4186 return; 4187 } 4188 4189 rdata = lpfc_cmd->rdata; 4190 ndlp = rdata->pnode; 4191 4192 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4193 /* TOREMOVE - currently this flag is checked during 4194 * the release of lpfc_iocbq. Remove once we move 4195 * to lpfc_wqe_job construct. 4196 * 4197 * This needs to be done outside buf_lock 4198 */ 4199 spin_lock_irqsave(&phba->hbalock, iflags); 4200 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY; 4201 spin_unlock_irqrestore(&phba->hbalock, iflags); 4202 } 4203 4204 /* Guard against abort handler being called at same time */ 4205 spin_lock(&lpfc_cmd->buf_lock); 4206 4207 /* Sanity check on return of outstanding command */ 4208 cmd = lpfc_cmd->pCmd; 4209 if (!cmd) { 4210 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4211 "9042 I/O completion: Not an active IO\n"); 4212 spin_unlock(&lpfc_cmd->buf_lock); 4213 lpfc_release_scsi_buf(phba, lpfc_cmd); 4214 return; 4215 } 4216 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4217 if (phba->sli4_hba.hdwq) 4218 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4219 4220 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4221 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4222 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4223 #endif 4224 shost = cmd->device->host; 4225 4226 status = bf_get(lpfc_wcqe_c_status, wcqe); 4227 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); 4228 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4229 4230 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4231 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4232 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4233 if (phba->cfg_fcp_wait_abts_rsp) 4234 wait_xb_clr = 1; 4235 } 4236 4237 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4238 if (lpfc_cmd->prot_data_type) { 4239 struct scsi_dif_tuple *src = NULL; 4240 4241 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4242 /* 4243 * Used to restore any changes to protection 4244 * data for error injection. 4245 */ 4246 switch (lpfc_cmd->prot_data_type) { 4247 case LPFC_INJERR_REFTAG: 4248 src->ref_tag = 4249 lpfc_cmd->prot_data; 4250 break; 4251 case LPFC_INJERR_APPTAG: 4252 src->app_tag = 4253 (uint16_t)lpfc_cmd->prot_data; 4254 break; 4255 case LPFC_INJERR_GUARD: 4256 src->guard_tag = 4257 (uint16_t)lpfc_cmd->prot_data; 4258 break; 4259 default: 4260 break; 4261 } 4262 4263 lpfc_cmd->prot_data = 0; 4264 lpfc_cmd->prot_data_type = 0; 4265 lpfc_cmd->prot_data_segment = NULL; 4266 } 4267 #endif 4268 if (unlikely(lpfc_cmd->status)) { 4269 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4270 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4271 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4272 else if (lpfc_cmd->status >= IOSTAT_CNT) 4273 lpfc_cmd->status = IOSTAT_DEFAULT; 4274 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4275 !lpfc_cmd->fcp_rsp->rspStatus3 && 4276 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4277 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4278 logit = 0; 4279 else 4280 logit = LOG_FCP | LOG_FCP_UNDER; 4281 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4282 "9034 FCP cmd x%x failed <%d/%lld> " 4283 "status: x%x result: x%x " 4284 "sid: x%x did: x%x oxid: x%x " 4285 "Data: x%x x%x x%x\n", 4286 cmd->cmnd[0], 4287 cmd->device ? cmd->device->id : 0xffff, 4288 cmd->device ? cmd->device->lun : 0xffff, 4289 lpfc_cmd->status, lpfc_cmd->result, 4290 vport->fc_myDID, 4291 (ndlp) ? ndlp->nlp_DID : 0, 4292 lpfc_cmd->cur_iocbq.sli4_xritag, 4293 wcqe->parameter, wcqe->total_data_placed, 4294 lpfc_cmd->cur_iocbq.iotag); 4295 } 4296 4297 switch (lpfc_cmd->status) { 4298 case IOSTAT_SUCCESS: 4299 cmd->result = DID_OK << 16; 4300 break; 4301 case IOSTAT_FCP_RSP_ERROR: 4302 lpfc_handle_fcp_err(vport, lpfc_cmd, 4303 pwqeIn->wqe.fcp_iread.total_xfer_len - 4304 wcqe->total_data_placed); 4305 break; 4306 case IOSTAT_NPORT_BSY: 4307 case IOSTAT_FABRIC_BSY: 4308 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4309 fast_path_evt = lpfc_alloc_fast_evt(phba); 4310 if (!fast_path_evt) 4311 break; 4312 fast_path_evt->un.fabric_evt.event_type = 4313 FC_REG_FABRIC_EVENT; 4314 fast_path_evt->un.fabric_evt.subcategory = 4315 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4316 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4317 if (ndlp) { 4318 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4319 &ndlp->nlp_portname, 4320 sizeof(struct lpfc_name)); 4321 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4322 &ndlp->nlp_nodename, 4323 sizeof(struct lpfc_name)); 4324 } 4325 fast_path_evt->vport = vport; 4326 fast_path_evt->work_evt.evt = 4327 LPFC_EVT_FASTPATH_MGMT_EVT; 4328 spin_lock_irqsave(&phba->hbalock, flags); 4329 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4330 &phba->work_list); 4331 spin_unlock_irqrestore(&phba->hbalock, flags); 4332 lpfc_worker_wake_up(phba); 4333 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4334 "9035 Fabric/Node busy FCP cmd x%x failed" 4335 " <%d/%lld> " 4336 "status: x%x result: x%x " 4337 "sid: x%x did: x%x oxid: x%x " 4338 "Data: x%x x%x x%x\n", 4339 cmd->cmnd[0], 4340 cmd->device ? cmd->device->id : 0xffff, 4341 cmd->device ? cmd->device->lun : 0xffff, 4342 lpfc_cmd->status, lpfc_cmd->result, 4343 vport->fc_myDID, 4344 (ndlp) ? ndlp->nlp_DID : 0, 4345 lpfc_cmd->cur_iocbq.sli4_xritag, 4346 wcqe->parameter, 4347 wcqe->total_data_placed, 4348 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4349 break; 4350 case IOSTAT_REMOTE_STOP: 4351 if (ndlp) { 4352 /* This I/O was aborted by the target, we don't 4353 * know the rxid and because we did not send the 4354 * ABTS we cannot generate and RRQ. 4355 */ 4356 lpfc_set_rrq_active(phba, ndlp, 4357 lpfc_cmd->cur_iocbq.sli4_lxritag, 4358 0, 0); 4359 } 4360 fallthrough; 4361 case IOSTAT_LOCAL_REJECT: 4362 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4363 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4364 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4365 lpfc_cmd->result == 4366 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4367 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4368 lpfc_cmd->result == 4369 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4370 cmd->result = DID_NO_CONNECT << 16; 4371 break; 4372 } 4373 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4374 lpfc_cmd->result == IOERR_NO_RESOURCES || 4375 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4376 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4377 cmd->result = DID_REQUEUE << 16; 4378 break; 4379 } 4380 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4381 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4382 status == CQE_STATUS_DI_ERROR) { 4383 if (scsi_get_prot_op(cmd) != 4384 SCSI_PROT_NORMAL) { 4385 /* 4386 * This is a response for a BG enabled 4387 * cmd. Parse BG error 4388 */ 4389 lpfc_sli4_parse_bg_err(phba, lpfc_cmd, 4390 wcqe); 4391 break; 4392 } 4393 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4394 "9040 non-zero BGSTAT on unprotected cmd\n"); 4395 } 4396 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4397 "9036 Local Reject FCP cmd x%x failed" 4398 " <%d/%lld> " 4399 "status: x%x result: x%x " 4400 "sid: x%x did: x%x oxid: x%x " 4401 "Data: x%x x%x x%x\n", 4402 cmd->cmnd[0], 4403 cmd->device ? cmd->device->id : 0xffff, 4404 cmd->device ? cmd->device->lun : 0xffff, 4405 lpfc_cmd->status, lpfc_cmd->result, 4406 vport->fc_myDID, 4407 (ndlp) ? ndlp->nlp_DID : 0, 4408 lpfc_cmd->cur_iocbq.sli4_xritag, 4409 wcqe->parameter, 4410 wcqe->total_data_placed, 4411 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4412 fallthrough; 4413 default: 4414 if (lpfc_cmd->status >= IOSTAT_CNT) 4415 lpfc_cmd->status = IOSTAT_DEFAULT; 4416 cmd->result = DID_ERROR << 16; 4417 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 4418 "9037 FCP Completion Error: xri %x " 4419 "status x%x result x%x [x%x] " 4420 "placed x%x\n", 4421 lpfc_cmd->cur_iocbq.sli4_xritag, 4422 lpfc_cmd->status, lpfc_cmd->result, 4423 wcqe->parameter, 4424 wcqe->total_data_placed); 4425 } 4426 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4427 u32 *lp = (u32 *)cmd->sense_buffer; 4428 4429 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4430 "9039 Iodone <%d/%llu> cmd x%px, error " 4431 "x%x SNS x%x x%x Data: x%x x%x\n", 4432 cmd->device->id, cmd->device->lun, cmd, 4433 cmd->result, *lp, *(lp + 3), cmd->retries, 4434 scsi_get_resid(cmd)); 4435 } 4436 4437 lpfc_update_stats(vport, lpfc_cmd); 4438 4439 if (vport->cfg_max_scsicmpl_time && 4440 time_after(jiffies, lpfc_cmd->start_time + 4441 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4442 spin_lock_irqsave(shost->host_lock, flags); 4443 if (ndlp) { 4444 if (ndlp->cmd_qdepth > 4445 atomic_read(&ndlp->cmd_pending) && 4446 (atomic_read(&ndlp->cmd_pending) > 4447 LPFC_MIN_TGT_QDEPTH) && 4448 (cmd->cmnd[0] == READ_10 || 4449 cmd->cmnd[0] == WRITE_10)) 4450 ndlp->cmd_qdepth = 4451 atomic_read(&ndlp->cmd_pending); 4452 4453 ndlp->last_change_time = jiffies; 4454 } 4455 spin_unlock_irqrestore(shost->host_lock, flags); 4456 } 4457 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4458 4459 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4460 if (lpfc_cmd->ts_cmd_start) { 4461 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4462 lpfc_cmd->ts_data_io = ktime_get_ns(); 4463 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4464 lpfc_io_ktime(phba, lpfc_cmd); 4465 } 4466 #endif 4467 if (likely(!wait_xb_clr)) 4468 lpfc_cmd->pCmd = NULL; 4469 spin_unlock(&lpfc_cmd->buf_lock); 4470 4471 /* Check if IO qualified for CMF */ 4472 if (phba->cmf_active_mode != LPFC_CFG_OFF && 4473 cmd->sc_data_direction == DMA_FROM_DEVICE && 4474 (scsi_sg_count(cmd))) { 4475 /* Used when calculating average latency */ 4476 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; 4477 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); 4478 } 4479 4480 if (wait_xb_clr) 4481 goto out; 4482 4483 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4484 cmd->scsi_done(cmd); 4485 4486 /* 4487 * If there is an abort thread waiting for command completion 4488 * wake up the thread. 4489 */ 4490 spin_lock(&lpfc_cmd->buf_lock); 4491 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4492 if (lpfc_cmd->waitq) 4493 wake_up(lpfc_cmd->waitq); 4494 spin_unlock(&lpfc_cmd->buf_lock); 4495 out: 4496 lpfc_release_scsi_buf(phba, lpfc_cmd); 4497 } 4498 4499 /** 4500 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4501 * @phba: The Hba for which this call is being executed. 4502 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4503 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4504 * 4505 * This routine assigns scsi command result by looking into response IOCB 4506 * status field appropriately. This routine handles QUEUE FULL condition as 4507 * well by ramping down device queue depth. 4508 **/ 4509 static void 4510 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4511 struct lpfc_iocbq *pIocbOut) 4512 { 4513 struct lpfc_io_buf *lpfc_cmd = 4514 (struct lpfc_io_buf *) pIocbIn->context1; 4515 struct lpfc_vport *vport = pIocbIn->vport; 4516 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4517 struct lpfc_nodelist *pnode = rdata->pnode; 4518 struct scsi_cmnd *cmd; 4519 unsigned long flags; 4520 struct lpfc_fast_path_event *fast_path_evt; 4521 struct Scsi_Host *shost; 4522 int idx; 4523 uint32_t logit = LOG_FCP; 4524 4525 /* Guard against abort handler being called at same time */ 4526 spin_lock(&lpfc_cmd->buf_lock); 4527 4528 /* Sanity check on return of outstanding command */ 4529 cmd = lpfc_cmd->pCmd; 4530 if (!cmd || !phba) { 4531 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4532 "2621 IO completion: Not an active IO\n"); 4533 spin_unlock(&lpfc_cmd->buf_lock); 4534 return; 4535 } 4536 4537 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4538 if (phba->sli4_hba.hdwq) 4539 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4540 4541 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4542 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4543 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4544 #endif 4545 shost = cmd->device->host; 4546 4547 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4548 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4549 /* pick up SLI4 exchange busy status from HBA */ 4550 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4551 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) 4552 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4553 4554 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4555 if (lpfc_cmd->prot_data_type) { 4556 struct scsi_dif_tuple *src = NULL; 4557 4558 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4559 /* 4560 * Used to restore any changes to protection 4561 * data for error injection. 4562 */ 4563 switch (lpfc_cmd->prot_data_type) { 4564 case LPFC_INJERR_REFTAG: 4565 src->ref_tag = 4566 lpfc_cmd->prot_data; 4567 break; 4568 case LPFC_INJERR_APPTAG: 4569 src->app_tag = 4570 (uint16_t)lpfc_cmd->prot_data; 4571 break; 4572 case LPFC_INJERR_GUARD: 4573 src->guard_tag = 4574 (uint16_t)lpfc_cmd->prot_data; 4575 break; 4576 default: 4577 break; 4578 } 4579 4580 lpfc_cmd->prot_data = 0; 4581 lpfc_cmd->prot_data_type = 0; 4582 lpfc_cmd->prot_data_segment = NULL; 4583 } 4584 #endif 4585 4586 if (unlikely(lpfc_cmd->status)) { 4587 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4588 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4589 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4590 else if (lpfc_cmd->status >= IOSTAT_CNT) 4591 lpfc_cmd->status = IOSTAT_DEFAULT; 4592 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4593 !lpfc_cmd->fcp_rsp->rspStatus3 && 4594 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4595 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4596 logit = 0; 4597 else 4598 logit = LOG_FCP | LOG_FCP_UNDER; 4599 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4600 "9030 FCP cmd x%x failed <%d/%lld> " 4601 "status: x%x result: x%x " 4602 "sid: x%x did: x%x oxid: x%x " 4603 "Data: x%x x%x\n", 4604 cmd->cmnd[0], 4605 cmd->device ? cmd->device->id : 0xffff, 4606 cmd->device ? cmd->device->lun : 0xffff, 4607 lpfc_cmd->status, lpfc_cmd->result, 4608 vport->fc_myDID, 4609 (pnode) ? pnode->nlp_DID : 0, 4610 phba->sli_rev == LPFC_SLI_REV4 ? 4611 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4612 pIocbOut->iocb.ulpContext, 4613 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4614 4615 switch (lpfc_cmd->status) { 4616 case IOSTAT_FCP_RSP_ERROR: 4617 /* Call FCP RSP handler to determine result */ 4618 lpfc_handle_fcp_err(vport, lpfc_cmd, 4619 pIocbOut->iocb.un.fcpi.fcpi_parm); 4620 break; 4621 case IOSTAT_NPORT_BSY: 4622 case IOSTAT_FABRIC_BSY: 4623 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4624 fast_path_evt = lpfc_alloc_fast_evt(phba); 4625 if (!fast_path_evt) 4626 break; 4627 fast_path_evt->un.fabric_evt.event_type = 4628 FC_REG_FABRIC_EVENT; 4629 fast_path_evt->un.fabric_evt.subcategory = 4630 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4631 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4632 if (pnode) { 4633 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4634 &pnode->nlp_portname, 4635 sizeof(struct lpfc_name)); 4636 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4637 &pnode->nlp_nodename, 4638 sizeof(struct lpfc_name)); 4639 } 4640 fast_path_evt->vport = vport; 4641 fast_path_evt->work_evt.evt = 4642 LPFC_EVT_FASTPATH_MGMT_EVT; 4643 spin_lock_irqsave(&phba->hbalock, flags); 4644 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4645 &phba->work_list); 4646 spin_unlock_irqrestore(&phba->hbalock, flags); 4647 lpfc_worker_wake_up(phba); 4648 break; 4649 case IOSTAT_LOCAL_REJECT: 4650 case IOSTAT_REMOTE_STOP: 4651 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4652 lpfc_cmd->result == 4653 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4654 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4655 lpfc_cmd->result == 4656 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4657 cmd->result = DID_NO_CONNECT << 16; 4658 break; 4659 } 4660 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4661 lpfc_cmd->result == IOERR_NO_RESOURCES || 4662 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4663 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4664 cmd->result = DID_REQUEUE << 16; 4665 break; 4666 } 4667 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4668 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4669 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4670 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4671 /* 4672 * This is a response for a BG enabled 4673 * cmd. Parse BG error 4674 */ 4675 lpfc_parse_bg_err(phba, lpfc_cmd, 4676 pIocbOut); 4677 break; 4678 } else { 4679 lpfc_printf_vlog(vport, KERN_WARNING, 4680 LOG_BG, 4681 "9031 non-zero BGSTAT " 4682 "on unprotected cmd\n"); 4683 } 4684 } 4685 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4686 && (phba->sli_rev == LPFC_SLI_REV4) 4687 && pnode) { 4688 /* This IO was aborted by the target, we don't 4689 * know the rxid and because we did not send the 4690 * ABTS we cannot generate and RRQ. 4691 */ 4692 lpfc_set_rrq_active(phba, pnode, 4693 lpfc_cmd->cur_iocbq.sli4_lxritag, 4694 0, 0); 4695 } 4696 fallthrough; 4697 default: 4698 cmd->result = DID_ERROR << 16; 4699 break; 4700 } 4701 4702 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4703 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4704 SAM_STAT_BUSY; 4705 } else 4706 cmd->result = DID_OK << 16; 4707 4708 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4709 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4710 4711 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4712 "0710 Iodone <%d/%llu> cmd x%px, error " 4713 "x%x SNS x%x x%x Data: x%x x%x\n", 4714 cmd->device->id, cmd->device->lun, cmd, 4715 cmd->result, *lp, *(lp + 3), cmd->retries, 4716 scsi_get_resid(cmd)); 4717 } 4718 4719 lpfc_update_stats(vport, lpfc_cmd); 4720 if (vport->cfg_max_scsicmpl_time && 4721 time_after(jiffies, lpfc_cmd->start_time + 4722 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4723 spin_lock_irqsave(shost->host_lock, flags); 4724 if (pnode) { 4725 if (pnode->cmd_qdepth > 4726 atomic_read(&pnode->cmd_pending) && 4727 (atomic_read(&pnode->cmd_pending) > 4728 LPFC_MIN_TGT_QDEPTH) && 4729 ((cmd->cmnd[0] == READ_10) || 4730 (cmd->cmnd[0] == WRITE_10))) 4731 pnode->cmd_qdepth = 4732 atomic_read(&pnode->cmd_pending); 4733 4734 pnode->last_change_time = jiffies; 4735 } 4736 spin_unlock_irqrestore(shost->host_lock, flags); 4737 } 4738 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4739 4740 lpfc_cmd->pCmd = NULL; 4741 spin_unlock(&lpfc_cmd->buf_lock); 4742 4743 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4744 if (lpfc_cmd->ts_cmd_start) { 4745 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4746 lpfc_cmd->ts_data_io = ktime_get_ns(); 4747 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4748 lpfc_io_ktime(phba, lpfc_cmd); 4749 } 4750 #endif 4751 4752 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4753 cmd->scsi_done(cmd); 4754 4755 /* 4756 * If there is an abort thread waiting for command completion 4757 * wake up the thread. 4758 */ 4759 spin_lock(&lpfc_cmd->buf_lock); 4760 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4761 if (lpfc_cmd->waitq) 4762 wake_up(lpfc_cmd->waitq); 4763 spin_unlock(&lpfc_cmd->buf_lock); 4764 4765 lpfc_release_scsi_buf(phba, lpfc_cmd); 4766 } 4767 4768 /** 4769 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4770 * @vport: Pointer to vport object. 4771 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4772 * @tmo: timeout value for the IO 4773 * 4774 * Based on the data-direction of the command, initialize IOCB 4775 * in the I/O buffer. Fill in the IOCB fields which are independent 4776 * of the scsi buffer 4777 * 4778 * RETURNS 0 - SUCCESS, 4779 **/ 4780 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4781 struct lpfc_io_buf *lpfc_cmd, 4782 uint8_t tmo) 4783 { 4784 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4785 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4786 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4787 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4788 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4789 int datadir = scsi_cmnd->sc_data_direction; 4790 u32 fcpdl; 4791 4792 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4793 4794 /* 4795 * There are three possibilities here - use scatter-gather segment, use 4796 * the single mapping, or neither. Start the lpfc command prep by 4797 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4798 * data bde entry. 4799 */ 4800 if (scsi_sg_count(scsi_cmnd)) { 4801 if (datadir == DMA_TO_DEVICE) { 4802 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4803 iocb_cmd->ulpPU = PARM_READ_CHECK; 4804 if (vport->cfg_first_burst_size && 4805 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4806 u32 xrdy_len; 4807 4808 fcpdl = scsi_bufflen(scsi_cmnd); 4809 xrdy_len = min(fcpdl, 4810 vport->cfg_first_burst_size); 4811 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4812 } 4813 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4814 } else { 4815 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4816 iocb_cmd->ulpPU = PARM_READ_CHECK; 4817 fcp_cmnd->fcpCntl3 = READ_DATA; 4818 } 4819 } else { 4820 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4821 iocb_cmd->un.fcpi.fcpi_parm = 0; 4822 iocb_cmd->ulpPU = 0; 4823 fcp_cmnd->fcpCntl3 = 0; 4824 } 4825 4826 /* 4827 * Finish initializing those IOCB fields that are independent 4828 * of the scsi_cmnd request_buffer 4829 */ 4830 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4831 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4832 piocbq->iocb.ulpFCP2Rcvy = 1; 4833 else 4834 piocbq->iocb.ulpFCP2Rcvy = 0; 4835 4836 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4837 piocbq->context1 = lpfc_cmd; 4838 if (!piocbq->iocb_cmpl) 4839 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4840 piocbq->iocb.ulpTimeout = tmo; 4841 piocbq->vport = vport; 4842 return 0; 4843 } 4844 4845 /** 4846 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4847 * @vport: Pointer to vport object. 4848 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4849 * @tmo: timeout value for the IO 4850 * 4851 * Based on the data-direction of the command copy WQE template 4852 * to I/O buffer WQE. Fill in the WQE fields which are independent 4853 * of the scsi buffer 4854 * 4855 * RETURNS 0 - SUCCESS, 4856 **/ 4857 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4858 struct lpfc_io_buf *lpfc_cmd, 4859 uint8_t tmo) 4860 { 4861 struct lpfc_hba *phba = vport->phba; 4862 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4863 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4864 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4865 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4866 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4867 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4868 u16 idx = lpfc_cmd->hdwq_no; 4869 int datadir = scsi_cmnd->sc_data_direction; 4870 4871 hdwq = &phba->sli4_hba.hdwq[idx]; 4872 4873 /* Initialize 64 bytes only */ 4874 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4875 4876 /* 4877 * There are three possibilities here - use scatter-gather segment, use 4878 * the single mapping, or neither. 4879 */ 4880 if (scsi_sg_count(scsi_cmnd)) { 4881 if (datadir == DMA_TO_DEVICE) { 4882 /* From the iwrite template, initialize words 7 - 11 */ 4883 memcpy(&wqe->words[7], 4884 &lpfc_iwrite_cmd_template.words[7], 4885 sizeof(uint32_t) * 5); 4886 4887 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4888 if (hdwq) 4889 hdwq->scsi_cstat.output_requests++; 4890 } else { 4891 /* From the iread template, initialize words 7 - 11 */ 4892 memcpy(&wqe->words[7], 4893 &lpfc_iread_cmd_template.words[7], 4894 sizeof(uint32_t) * 5); 4895 4896 /* Word 7 */ 4897 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4898 4899 fcp_cmnd->fcpCntl3 = READ_DATA; 4900 if (hdwq) 4901 hdwq->scsi_cstat.input_requests++; 4902 4903 /* For a CMF Managed port, iod must be zero'ed */ 4904 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 4905 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 4906 LPFC_WQE_IOD_NONE); 4907 } 4908 } else { 4909 /* From the icmnd template, initialize words 4 - 11 */ 4910 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4911 sizeof(uint32_t) * 8); 4912 4913 /* Word 7 */ 4914 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4915 4916 fcp_cmnd->fcpCntl3 = 0; 4917 if (hdwq) 4918 hdwq->scsi_cstat.control_requests++; 4919 } 4920 4921 /* 4922 * Finish initializing those WQE fields that are independent 4923 * of the request_buffer 4924 */ 4925 4926 /* Word 3 */ 4927 bf_set(payload_offset_len, &wqe->fcp_icmd, 4928 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4929 4930 /* Word 6 */ 4931 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4932 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4933 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4934 4935 /* Word 7*/ 4936 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4937 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4938 4939 bf_set(wqe_class, &wqe->generic.wqe_com, 4940 (pnode->nlp_fcp_info & 0x0f)); 4941 4942 /* Word 8 */ 4943 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4944 4945 /* Word 9 */ 4946 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4947 4948 pwqeq->vport = vport; 4949 pwqeq->vport = vport; 4950 pwqeq->context1 = lpfc_cmd; 4951 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4952 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4953 4954 return 0; 4955 } 4956 4957 /** 4958 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4959 * @vport: The virtual port for which this call is being executed. 4960 * @lpfc_cmd: The scsi command which needs to send. 4961 * @pnode: Pointer to lpfc_nodelist. 4962 * 4963 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4964 * to transfer for device with SLI3 interface spec. 4965 **/ 4966 static int 4967 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4968 struct lpfc_nodelist *pnode) 4969 { 4970 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4971 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4972 u8 *ptr; 4973 4974 if (!pnode) 4975 return 0; 4976 4977 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4978 /* clear task management bits */ 4979 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4980 4981 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4982 &lpfc_cmd->fcp_cmnd->fcp_lun); 4983 4984 ptr = &fcp_cmnd->fcpCdb[0]; 4985 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4986 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4987 ptr += scsi_cmnd->cmd_len; 4988 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4989 } 4990 4991 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4992 4993 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4994 4995 return 0; 4996 } 4997 4998 /** 4999 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit 5000 * @vport: The virtual port for which this call is being executed. 5001 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5002 * @lun: Logical unit number. 5003 * @task_mgmt_cmd: SCSI task management command. 5004 * 5005 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 5006 * for device with SLI-3 interface spec. 5007 * 5008 * Return codes: 5009 * 0 - Error 5010 * 1 - Success 5011 **/ 5012 static int 5013 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 5014 struct lpfc_io_buf *lpfc_cmd, 5015 uint64_t lun, 5016 uint8_t task_mgmt_cmd) 5017 { 5018 struct lpfc_iocbq *piocbq; 5019 IOCB_t *piocb; 5020 struct fcp_cmnd *fcp_cmnd; 5021 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 5022 struct lpfc_nodelist *ndlp = rdata->pnode; 5023 5024 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 5025 return 0; 5026 5027 piocbq = &(lpfc_cmd->cur_iocbq); 5028 piocbq->vport = vport; 5029 5030 piocb = &piocbq->iocb; 5031 5032 fcp_cmnd = lpfc_cmd->fcp_cmnd; 5033 /* Clear out any old data in the FCP command area */ 5034 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 5035 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 5036 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 5037 if (vport->phba->sli_rev == 3 && 5038 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 5039 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 5040 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 5041 piocb->ulpContext = ndlp->nlp_rpi; 5042 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 5043 piocb->ulpContext = 5044 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 5045 } 5046 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 5047 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 5048 piocb->ulpPU = 0; 5049 piocb->un.fcpi.fcpi_parm = 0; 5050 5051 /* ulpTimeout is only one byte */ 5052 if (lpfc_cmd->timeout > 0xff) { 5053 /* 5054 * Do not timeout the command at the firmware level. 5055 * The driver will provide the timeout mechanism. 5056 */ 5057 piocb->ulpTimeout = 0; 5058 } else 5059 piocb->ulpTimeout = lpfc_cmd->timeout; 5060 5061 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5062 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 5063 5064 return 1; 5065 } 5066 5067 /** 5068 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 5069 * @phba: The hba struct for which this call is being executed. 5070 * @dev_grp: The HBA PCI-Device group number. 5071 * 5072 * This routine sets up the SCSI interface API function jump table in @phba 5073 * struct. 5074 * Returns: 0 - success, -ENODEV - failure. 5075 **/ 5076 int 5077 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5078 { 5079 5080 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 5081 5082 switch (dev_grp) { 5083 case LPFC_PCI_DEV_LP: 5084 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 5085 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 5086 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 5087 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 5088 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 5089 break; 5090 case LPFC_PCI_DEV_OC: 5091 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 5092 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 5093 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 5094 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 5095 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 5096 break; 5097 default: 5098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5099 "1418 Invalid HBA PCI-device group: 0x%x\n", 5100 dev_grp); 5101 return -ENODEV; 5102 } 5103 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 5104 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 5105 return 0; 5106 } 5107 5108 /** 5109 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 5110 * @phba: The Hba for which this call is being executed. 5111 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 5112 * @rspiocbq: Pointer to lpfc_iocbq data structure. 5113 * 5114 * This routine is IOCB completion routine for device reset and target reset 5115 * routine. This routine release scsi buffer associated with lpfc_cmd. 5116 **/ 5117 static void 5118 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 5119 struct lpfc_iocbq *cmdiocbq, 5120 struct lpfc_iocbq *rspiocbq) 5121 { 5122 struct lpfc_io_buf *lpfc_cmd = 5123 (struct lpfc_io_buf *) cmdiocbq->context1; 5124 if (lpfc_cmd) 5125 lpfc_release_scsi_buf(phba, lpfc_cmd); 5126 return; 5127 } 5128 5129 /** 5130 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 5131 * if issuing a pci_bus_reset is possibly unsafe 5132 * @phba: lpfc_hba pointer. 5133 * 5134 * Description: 5135 * Walks the bus_list to ensure only PCI devices with Emulex 5136 * vendor id, device ids that support hot reset, and only one occurrence 5137 * of function 0. 5138 * 5139 * Returns: 5140 * -EBADSLT, detected invalid device 5141 * 0, successful 5142 */ 5143 int 5144 lpfc_check_pci_resettable(struct lpfc_hba *phba) 5145 { 5146 const struct pci_dev *pdev = phba->pcidev; 5147 struct pci_dev *ptr = NULL; 5148 u8 counter = 0; 5149 5150 /* Walk the list of devices on the pci_dev's bus */ 5151 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 5152 /* Check for Emulex Vendor ID */ 5153 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5155 "8346 Non-Emulex vendor found: " 5156 "0x%04x\n", ptr->vendor); 5157 return -EBADSLT; 5158 } 5159 5160 /* Check for valid Emulex Device ID */ 5161 if (phba->sli_rev != LPFC_SLI_REV4 || 5162 phba->hba_flag & HBA_FCOE_MODE) { 5163 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5164 "8347 Incapable PCI reset device: " 5165 "0x%04x\n", ptr->device); 5166 return -EBADSLT; 5167 } 5168 5169 /* Check for only one function 0 ID to ensure only one HBA on 5170 * secondary bus 5171 */ 5172 if (ptr->devfn == 0) { 5173 if (++counter > 1) { 5174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5175 "8348 More than one device on " 5176 "secondary bus found\n"); 5177 return -EBADSLT; 5178 } 5179 } 5180 } 5181 5182 return 0; 5183 } 5184 5185 /** 5186 * lpfc_info - Info entry point of scsi_host_template data structure 5187 * @host: The scsi host for which this call is being executed. 5188 * 5189 * This routine provides module information about hba. 5190 * 5191 * Reutrn code: 5192 * Pointer to char - Success. 5193 **/ 5194 const char * 5195 lpfc_info(struct Scsi_Host *host) 5196 { 5197 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5198 struct lpfc_hba *phba = vport->phba; 5199 int link_speed = 0; 5200 static char lpfcinfobuf[384]; 5201 char tmp[384] = {0}; 5202 5203 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5204 if (phba && phba->pcidev){ 5205 /* Model Description */ 5206 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5207 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5208 sizeof(lpfcinfobuf)) 5209 goto buffer_done; 5210 5211 /* PCI Info */ 5212 scnprintf(tmp, sizeof(tmp), 5213 " on PCI bus %02x device %02x irq %d", 5214 phba->pcidev->bus->number, phba->pcidev->devfn, 5215 phba->pcidev->irq); 5216 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5217 sizeof(lpfcinfobuf)) 5218 goto buffer_done; 5219 5220 /* Port Number */ 5221 if (phba->Port[0]) { 5222 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5223 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5224 sizeof(lpfcinfobuf)) 5225 goto buffer_done; 5226 } 5227 5228 /* Link Speed */ 5229 link_speed = lpfc_sli_port_speed_get(phba); 5230 if (link_speed != 0) { 5231 scnprintf(tmp, sizeof(tmp), 5232 " Logical Link Speed: %d Mbps", link_speed); 5233 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5234 sizeof(lpfcinfobuf)) 5235 goto buffer_done; 5236 } 5237 5238 /* PCI resettable */ 5239 if (!lpfc_check_pci_resettable(phba)) { 5240 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5241 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5242 } 5243 } 5244 5245 buffer_done: 5246 return lpfcinfobuf; 5247 } 5248 5249 /** 5250 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5251 * @phba: The Hba for which this call is being executed. 5252 * 5253 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5254 * The default value of cfg_poll_tmo is 10 milliseconds. 5255 **/ 5256 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5257 { 5258 unsigned long poll_tmo_expires = 5259 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5260 5261 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5262 mod_timer(&phba->fcp_poll_timer, 5263 poll_tmo_expires); 5264 } 5265 5266 /** 5267 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5268 * @phba: The Hba for which this call is being executed. 5269 * 5270 * This routine starts the fcp_poll_timer of @phba. 5271 **/ 5272 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5273 { 5274 lpfc_poll_rearm_timer(phba); 5275 } 5276 5277 /** 5278 * lpfc_poll_timeout - Restart polling timer 5279 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5280 * 5281 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5282 * and FCP Ring interrupt is disable. 5283 **/ 5284 void lpfc_poll_timeout(struct timer_list *t) 5285 { 5286 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5287 5288 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5289 lpfc_sli_handle_fast_ring_event(phba, 5290 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5291 5292 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5293 lpfc_poll_rearm_timer(phba); 5294 } 5295 } 5296 5297 /* 5298 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table 5299 * @vport: The virtual port for which this call is being executed. 5300 * @hash: calculated hash value 5301 * @buf: uuid associated with the VE 5302 * Return the VMID entry associated with the UUID 5303 * Make sure to acquire the appropriate lock before invoking this routine. 5304 */ 5305 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, 5306 u32 hash, u8 *buf) 5307 { 5308 struct lpfc_vmid *vmp; 5309 5310 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { 5311 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) 5312 return vmp; 5313 } 5314 return NULL; 5315 } 5316 5317 /* 5318 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table 5319 * @vport: The virtual port for which this call is being executed. 5320 * @hash - calculated hash value 5321 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5322 * 5323 * This routine will insert the newly acquired VMID entity in the hash table. 5324 * Make sure to acquire the appropriate lock before invoking this routine. 5325 */ 5326 static void 5327 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 5328 struct lpfc_vmid *vmp) 5329 { 5330 hash_add(vport->hash_table, &vmp->hnode, hash); 5331 } 5332 5333 /* 5334 * lpfc_vmid_hash_fn - create a hash value of the UUID 5335 * @vmid: uuid associated with the VE 5336 * @len: length of the VMID string 5337 * Returns the calculated hash value 5338 */ 5339 int lpfc_vmid_hash_fn(const char *vmid, int len) 5340 { 5341 int c; 5342 int hash = 0; 5343 5344 if (len == 0) 5345 return 0; 5346 while (len--) { 5347 c = *vmid++; 5348 if (c >= 'A' && c <= 'Z') 5349 c += 'a' - 'A'; 5350 5351 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + 5352 (c >> LPFC_VMID_HASH_SHIFT)) * 19; 5353 } 5354 5355 return hash & LPFC_VMID_HASH_MASK; 5356 } 5357 5358 /* 5359 * lpfc_vmid_update_entry - update the vmid entry in the hash table 5360 * @vport: The virtual port for which this call is being executed. 5361 * @cmd: address of scsi cmd descriptor 5362 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5363 * @tag: VMID tag 5364 */ 5365 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 5366 *cmd, struct lpfc_vmid *vmp, 5367 union lpfc_vmid_io_tag *tag) 5368 { 5369 u64 *lta; 5370 5371 if (vport->vmid_priority_tagging) 5372 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; 5373 else 5374 tag->app_id = vmp->un.app_id; 5375 5376 if (cmd->sc_data_direction == DMA_TO_DEVICE) 5377 vmp->io_wr_cnt++; 5378 else 5379 vmp->io_rd_cnt++; 5380 5381 /* update the last access timestamp in the table */ 5382 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); 5383 *lta = jiffies; 5384 } 5385 5386 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 5387 struct lpfc_vmid *vmid) 5388 { 5389 u32 hash; 5390 struct lpfc_vmid *pvmid; 5391 5392 if (vport->port_type == LPFC_PHYSICAL_PORT) { 5393 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5394 } else { 5395 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); 5396 pvmid = 5397 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, 5398 vmid->host_vmid); 5399 if (pvmid) 5400 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; 5401 else 5402 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5403 } 5404 } 5405 5406 /* 5407 * lpfc_vmid_get_appid - get the VMID associated with the UUID 5408 * @vport: The virtual port for which this call is being executed. 5409 * @uuid: UUID associated with the VE 5410 * @cmd: address of scsi_cmd descriptor 5411 * @tag: VMID tag 5412 * Returns status of the function 5413 */ 5414 static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct 5415 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag) 5416 { 5417 struct lpfc_vmid *vmp = NULL; 5418 int hash, len, rc, i; 5419 5420 /* check if QFPA is complete */ 5421 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag & 5422 LPFC_VMID_QFPA_CMPL)) { 5423 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5424 return -EAGAIN; 5425 } 5426 5427 /* search if the UUID has already been mapped to the VMID */ 5428 len = strlen(uuid); 5429 hash = lpfc_vmid_hash_fn(uuid, len); 5430 5431 /* search for the VMID in the table */ 5432 read_lock(&vport->vmid_lock); 5433 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5434 5435 /* if found, check if its already registered */ 5436 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5437 read_unlock(&vport->vmid_lock); 5438 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5439 rc = 0; 5440 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || 5441 vmp->flag & LPFC_VMID_DE_REGISTER)) { 5442 /* else if register or dereg request has already been sent */ 5443 /* Hence VMID tag will not be added for this I/O */ 5444 read_unlock(&vport->vmid_lock); 5445 rc = -EBUSY; 5446 } else { 5447 /* The VMID was not found in the hashtable. At this point, */ 5448 /* drop the read lock first before proceeding further */ 5449 read_unlock(&vport->vmid_lock); 5450 /* start the process to obtain one as per the */ 5451 /* type of the VMID indicated */ 5452 write_lock(&vport->vmid_lock); 5453 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5454 5455 /* while the read lock was released, in case the entry was */ 5456 /* added by other context or is in process of being added */ 5457 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5458 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5459 write_unlock(&vport->vmid_lock); 5460 return 0; 5461 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { 5462 write_unlock(&vport->vmid_lock); 5463 return -EBUSY; 5464 } 5465 5466 /* else search and allocate a free slot in the hash table */ 5467 if (vport->cur_vmid_cnt < vport->max_vmid) { 5468 for (i = 0; i < vport->max_vmid; i++) { 5469 vmp = vport->vmid + i; 5470 if (vmp->flag == LPFC_VMID_SLOT_FREE) 5471 break; 5472 } 5473 if (i == vport->max_vmid) 5474 vmp = NULL; 5475 } else { 5476 vmp = NULL; 5477 } 5478 5479 if (!vmp) { 5480 write_unlock(&vport->vmid_lock); 5481 return -ENOMEM; 5482 } 5483 5484 /* Add the vmid and register */ 5485 lpfc_put_vmid_in_hashtable(vport, hash, vmp); 5486 vmp->vmid_len = len; 5487 memcpy(vmp->host_vmid, uuid, vmp->vmid_len); 5488 vmp->io_rd_cnt = 0; 5489 vmp->io_wr_cnt = 0; 5490 vmp->flag = LPFC_VMID_SLOT_USED; 5491 5492 vmp->delete_inactive = 5493 vport->vmid_inactivity_timeout ? 1 : 0; 5494 5495 /* if type priority tag, get next available VMID */ 5496 if (lpfc_vmid_is_type_priority_tag(vport)) 5497 lpfc_vmid_assign_cs_ctl(vport, vmp); 5498 5499 /* allocate the per cpu variable for holding */ 5500 /* the last access time stamp only if VMID is enabled */ 5501 if (!vmp->last_io_time) 5502 vmp->last_io_time = __alloc_percpu(sizeof(u64), 5503 __alignof__(struct 5504 lpfc_vmid)); 5505 if (!vmp->last_io_time) { 5506 hash_del(&vmp->hnode); 5507 vmp->flag = LPFC_VMID_SLOT_FREE; 5508 write_unlock(&vport->vmid_lock); 5509 return -EIO; 5510 } 5511 5512 write_unlock(&vport->vmid_lock); 5513 5514 /* complete transaction with switch */ 5515 if (lpfc_vmid_is_type_priority_tag(vport)) 5516 rc = lpfc_vmid_uvem(vport, vmp, true); 5517 else 5518 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); 5519 if (!rc) { 5520 write_lock(&vport->vmid_lock); 5521 vport->cur_vmid_cnt++; 5522 vmp->flag |= LPFC_VMID_REQ_REGISTER; 5523 write_unlock(&vport->vmid_lock); 5524 } else { 5525 write_lock(&vport->vmid_lock); 5526 hash_del(&vmp->hnode); 5527 vmp->flag = LPFC_VMID_SLOT_FREE; 5528 free_percpu(vmp->last_io_time); 5529 write_unlock(&vport->vmid_lock); 5530 return -EIO; 5531 } 5532 5533 /* finally, enable the idle timer once */ 5534 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { 5535 mod_timer(&vport->phba->inactive_vmid_poll, 5536 jiffies + 5537 msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); 5538 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; 5539 } 5540 } 5541 return rc; 5542 } 5543 5544 /* 5545 * lpfc_is_command_vm_io - get the UUID from blk cgroup 5546 * @cmd: Pointer to scsi_cmnd data structure 5547 * Returns UUID if present, otherwise NULL 5548 */ 5549 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) 5550 { 5551 struct bio *bio = scsi_cmd_to_rq(cmd)->bio; 5552 5553 return bio ? blkcg_get_fc_appid(bio) : NULL; 5554 } 5555 5556 /** 5557 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5558 * @shost: kernel scsi host pointer. 5559 * @cmnd: Pointer to scsi_cmnd data structure. 5560 * 5561 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5562 * This routine prepares an IOCB from scsi command and provides to firmware. 5563 * The @done callback is invoked after driver finished processing the command. 5564 * 5565 * Return value : 5566 * 0 - Success 5567 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5568 **/ 5569 static int 5570 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5571 { 5572 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5573 struct lpfc_hba *phba = vport->phba; 5574 struct lpfc_rport_data *rdata; 5575 struct lpfc_nodelist *ndlp; 5576 struct lpfc_io_buf *lpfc_cmd; 5577 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5578 int err, idx; 5579 u8 *uuid = NULL; 5580 uint64_t start; 5581 5582 start = ktime_get_ns(); 5583 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5584 5585 /* sanity check on references */ 5586 if (unlikely(!rdata) || unlikely(!rport)) 5587 goto out_fail_command; 5588 5589 err = fc_remote_port_chkready(rport); 5590 if (err) { 5591 cmnd->result = err; 5592 goto out_fail_command; 5593 } 5594 ndlp = rdata->pnode; 5595 5596 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5597 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5598 5599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5600 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5601 " op:%02x str=%s without registering for" 5602 " BlockGuard - Rejecting command\n", 5603 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5604 dif_op_str[scsi_get_prot_op(cmnd)]); 5605 goto out_fail_command; 5606 } 5607 5608 /* 5609 * Catch race where our node has transitioned, but the 5610 * transport is still transitioning. 5611 */ 5612 if (!ndlp) 5613 goto out_tgt_busy1; 5614 5615 /* Check if IO qualifies for CMF */ 5616 if (phba->cmf_active_mode != LPFC_CFG_OFF && 5617 cmnd->sc_data_direction == DMA_FROM_DEVICE && 5618 (scsi_sg_count(cmnd))) { 5619 /* Latency start time saved in rx_cmd_start later in routine */ 5620 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); 5621 if (err) 5622 goto out_tgt_busy1; 5623 } 5624 5625 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5626 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5627 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5628 "3377 Target Queue Full, scsi Id:%d " 5629 "Qdepth:%d Pending command:%d" 5630 " WWNN:%02x:%02x:%02x:%02x:" 5631 "%02x:%02x:%02x:%02x, " 5632 " WWPN:%02x:%02x:%02x:%02x:" 5633 "%02x:%02x:%02x:%02x", 5634 ndlp->nlp_sid, ndlp->cmd_qdepth, 5635 atomic_read(&ndlp->cmd_pending), 5636 ndlp->nlp_nodename.u.wwn[0], 5637 ndlp->nlp_nodename.u.wwn[1], 5638 ndlp->nlp_nodename.u.wwn[2], 5639 ndlp->nlp_nodename.u.wwn[3], 5640 ndlp->nlp_nodename.u.wwn[4], 5641 ndlp->nlp_nodename.u.wwn[5], 5642 ndlp->nlp_nodename.u.wwn[6], 5643 ndlp->nlp_nodename.u.wwn[7], 5644 ndlp->nlp_portname.u.wwn[0], 5645 ndlp->nlp_portname.u.wwn[1], 5646 ndlp->nlp_portname.u.wwn[2], 5647 ndlp->nlp_portname.u.wwn[3], 5648 ndlp->nlp_portname.u.wwn[4], 5649 ndlp->nlp_portname.u.wwn[5], 5650 ndlp->nlp_portname.u.wwn[6], 5651 ndlp->nlp_portname.u.wwn[7]); 5652 goto out_tgt_busy2; 5653 } 5654 } 5655 5656 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5657 if (lpfc_cmd == NULL) { 5658 lpfc_rampdown_queue_depth(phba); 5659 5660 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5661 "0707 driver's buffer pool is empty, " 5662 "IO busied\n"); 5663 goto out_host_busy; 5664 } 5665 lpfc_cmd->rx_cmd_start = start; 5666 5667 /* 5668 * Store the midlayer's command structure for the completion phase 5669 * and complete the command initialization. 5670 */ 5671 lpfc_cmd->pCmd = cmnd; 5672 lpfc_cmd->rdata = rdata; 5673 lpfc_cmd->ndlp = ndlp; 5674 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 5675 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5676 5677 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5678 if (err) 5679 goto out_host_busy_release_buf; 5680 5681 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5682 if (vport->phba->cfg_enable_bg) { 5683 lpfc_printf_vlog(vport, 5684 KERN_INFO, LOG_SCSI_CMD, 5685 "9033 BLKGRD: rcvd %s cmd:x%x " 5686 "reftag x%x cnt %u pt %x\n", 5687 dif_op_str[scsi_get_prot_op(cmnd)], 5688 cmnd->cmnd[0], 5689 scsi_prot_ref_tag(cmnd), 5690 scsi_logical_block_count(cmnd), 5691 (cmnd->cmnd[1]>>5)); 5692 } 5693 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5694 } else { 5695 if (vport->phba->cfg_enable_bg) { 5696 lpfc_printf_vlog(vport, 5697 KERN_INFO, LOG_SCSI_CMD, 5698 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5699 "x%x reftag x%x cnt %u pt %x\n", 5700 cmnd->cmnd[0], 5701 scsi_prot_ref_tag(cmnd), 5702 scsi_logical_block_count(cmnd), 5703 (cmnd->cmnd[1]>>5)); 5704 } 5705 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5706 } 5707 5708 if (unlikely(err)) { 5709 if (err == 2) { 5710 cmnd->result = DID_ERROR << 16; 5711 goto out_fail_command_release_buf; 5712 } 5713 goto out_host_busy_free_buf; 5714 } 5715 5716 5717 /* check the necessary and sufficient condition to support VMID */ 5718 if (lpfc_is_vmid_enabled(phba) && 5719 (ndlp->vmid_support || 5720 phba->pport->vmid_priority_tagging == 5721 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 5722 /* is the I/O generated by a VM, get the associated virtual */ 5723 /* entity id */ 5724 uuid = lpfc_is_command_vm_io(cmnd); 5725 5726 if (uuid) { 5727 err = lpfc_vmid_get_appid(vport, uuid, cmnd, 5728 (union lpfc_vmid_io_tag *) 5729 &lpfc_cmd->cur_iocbq.vmid_tag); 5730 if (!err) 5731 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID; 5732 } 5733 } 5734 5735 atomic_inc(&ndlp->cmd_pending); 5736 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5737 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5738 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5739 #endif 5740 /* Issue I/O to adapter */ 5741 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, 5742 &lpfc_cmd->cur_iocbq, 5743 SLI_IOCB_RET_IOCB); 5744 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5745 if (start) { 5746 lpfc_cmd->ts_cmd_start = start; 5747 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5748 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5749 } else { 5750 lpfc_cmd->ts_cmd_start = 0; 5751 } 5752 #endif 5753 if (err) { 5754 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5755 "3376 FCP could not issue IOCB err %x " 5756 "FCP cmd x%x <%d/%llu> " 5757 "sid: x%x did: x%x oxid: x%x " 5758 "Data: x%x x%x x%x x%x\n", 5759 err, cmnd->cmnd[0], 5760 cmnd->device ? cmnd->device->id : 0xffff, 5761 cmnd->device ? cmnd->device->lun : (u64)-1, 5762 vport->fc_myDID, ndlp->nlp_DID, 5763 phba->sli_rev == LPFC_SLI_REV4 ? 5764 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 5765 phba->sli_rev == LPFC_SLI_REV4 ? 5766 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5767 lpfc_cmd->cur_iocbq.iocb.ulpContext, 5768 lpfc_cmd->cur_iocbq.iotag, 5769 phba->sli_rev == LPFC_SLI_REV4 ? 5770 bf_get(wqe_tmo, 5771 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : 5772 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, 5773 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); 5774 5775 goto out_host_busy_free_buf; 5776 } 5777 5778 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5779 lpfc_sli_handle_fast_ring_event(phba, 5780 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5781 5782 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5783 lpfc_poll_rearm_timer(phba); 5784 } 5785 5786 if (phba->cfg_xri_rebalancing) 5787 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5788 5789 return 0; 5790 5791 out_host_busy_free_buf: 5792 idx = lpfc_cmd->hdwq_no; 5793 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5794 if (phba->sli4_hba.hdwq) { 5795 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5796 case WRITE_DATA: 5797 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5798 break; 5799 case READ_DATA: 5800 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5801 break; 5802 default: 5803 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5804 } 5805 } 5806 out_host_busy_release_buf: 5807 lpfc_release_scsi_buf(phba, lpfc_cmd); 5808 out_host_busy: 5809 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5810 shost); 5811 return SCSI_MLQUEUE_HOST_BUSY; 5812 5813 out_tgt_busy2: 5814 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5815 shost); 5816 out_tgt_busy1: 5817 return SCSI_MLQUEUE_TARGET_BUSY; 5818 5819 out_fail_command_release_buf: 5820 lpfc_release_scsi_buf(phba, lpfc_cmd); 5821 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5822 shost); 5823 5824 out_fail_command: 5825 cmnd->scsi_done(cmnd); 5826 return 0; 5827 } 5828 5829 /* 5830 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport 5831 * @vport: The virtual port for which this call is being executed. 5832 */ 5833 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) 5834 { 5835 u32 bucket; 5836 struct lpfc_vmid *cur; 5837 5838 if (vport->port_type == LPFC_PHYSICAL_PORT) 5839 del_timer_sync(&vport->phba->inactive_vmid_poll); 5840 5841 kfree(vport->qfpa_res); 5842 kfree(vport->vmid_priority.vmid_range); 5843 kfree(vport->vmid); 5844 5845 if (!hash_empty(vport->hash_table)) 5846 hash_for_each(vport->hash_table, bucket, cur, hnode) 5847 hash_del(&cur->hnode); 5848 5849 vport->qfpa_res = NULL; 5850 vport->vmid_priority.vmid_range = NULL; 5851 vport->vmid = NULL; 5852 vport->cur_vmid_cnt = 0; 5853 } 5854 5855 /** 5856 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5857 * @cmnd: Pointer to scsi_cmnd data structure. 5858 * 5859 * This routine aborts @cmnd pending in base driver. 5860 * 5861 * Return code : 5862 * 0x2003 - Error 5863 * 0x2002 - Success 5864 **/ 5865 static int 5866 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5867 { 5868 struct Scsi_Host *shost = cmnd->device->host; 5869 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5870 struct lpfc_hba *phba = vport->phba; 5871 struct lpfc_iocbq *iocb; 5872 struct lpfc_io_buf *lpfc_cmd; 5873 int ret = SUCCESS, status = 0; 5874 struct lpfc_sli_ring *pring_s4 = NULL; 5875 struct lpfc_sli_ring *pring = NULL; 5876 int ret_val; 5877 unsigned long flags; 5878 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5879 5880 status = fc_block_scsi_eh(cmnd); 5881 if (status != 0 && status != SUCCESS) 5882 return status; 5883 5884 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5885 if (!lpfc_cmd) 5886 return ret; 5887 5888 spin_lock_irqsave(&phba->hbalock, flags); 5889 /* driver queued commands are in process of being flushed */ 5890 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5891 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5892 "3168 SCSI Layer abort requested I/O has been " 5893 "flushed by LLD.\n"); 5894 ret = FAILED; 5895 goto out_unlock; 5896 } 5897 5898 /* Guard against IO completion being called at same time */ 5899 spin_lock(&lpfc_cmd->buf_lock); 5900 5901 if (!lpfc_cmd->pCmd) { 5902 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5903 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5904 "x%x ID %d LUN %llu\n", 5905 SUCCESS, cmnd->device->id, cmnd->device->lun); 5906 goto out_unlock_buf; 5907 } 5908 5909 iocb = &lpfc_cmd->cur_iocbq; 5910 if (phba->sli_rev == LPFC_SLI_REV4) { 5911 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5912 if (!pring_s4) { 5913 ret = FAILED; 5914 goto out_unlock_buf; 5915 } 5916 spin_lock(&pring_s4->ring_lock); 5917 } 5918 /* the command is in process of being cancelled */ 5919 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 5920 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5921 "3169 SCSI Layer abort requested I/O has been " 5922 "cancelled by LLD.\n"); 5923 ret = FAILED; 5924 goto out_unlock_ring; 5925 } 5926 /* 5927 * If pCmd field of the corresponding lpfc_io_buf structure 5928 * points to a different SCSI command, then the driver has 5929 * already completed this command, but the midlayer did not 5930 * see the completion before the eh fired. Just return SUCCESS. 5931 */ 5932 if (lpfc_cmd->pCmd != cmnd) { 5933 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5934 "3170 SCSI Layer abort requested I/O has been " 5935 "completed by LLD.\n"); 5936 goto out_unlock_ring; 5937 } 5938 5939 BUG_ON(iocb->context1 != lpfc_cmd); 5940 5941 /* abort issued in recovery is still in progress */ 5942 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 5943 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5944 "3389 SCSI Layer I/O Abort Request is pending\n"); 5945 if (phba->sli_rev == LPFC_SLI_REV4) 5946 spin_unlock(&pring_s4->ring_lock); 5947 spin_unlock(&lpfc_cmd->buf_lock); 5948 spin_unlock_irqrestore(&phba->hbalock, flags); 5949 goto wait_for_cmpl; 5950 } 5951 5952 lpfc_cmd->waitq = &waitq; 5953 if (phba->sli_rev == LPFC_SLI_REV4) { 5954 spin_unlock(&pring_s4->ring_lock); 5955 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5956 lpfc_sli4_abort_fcp_cmpl); 5957 } else { 5958 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5959 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5960 lpfc_sli_abort_fcp_cmpl); 5961 } 5962 5963 /* Make sure HBA is alive */ 5964 lpfc_issue_hb_tmo(phba); 5965 5966 if (ret_val != IOCB_SUCCESS) { 5967 /* Indicate the IO is not being aborted by the driver. */ 5968 lpfc_cmd->waitq = NULL; 5969 spin_unlock(&lpfc_cmd->buf_lock); 5970 spin_unlock_irqrestore(&phba->hbalock, flags); 5971 ret = FAILED; 5972 goto out; 5973 } 5974 5975 /* no longer need the lock after this point */ 5976 spin_unlock(&lpfc_cmd->buf_lock); 5977 spin_unlock_irqrestore(&phba->hbalock, flags); 5978 5979 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5980 lpfc_sli_handle_fast_ring_event(phba, 5981 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5982 5983 wait_for_cmpl: 5984 /* 5985 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait 5986 * for abort to complete. 5987 */ 5988 wait_event_timeout(waitq, 5989 (lpfc_cmd->pCmd != cmnd), 5990 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5991 5992 spin_lock(&lpfc_cmd->buf_lock); 5993 5994 if (lpfc_cmd->pCmd == cmnd) { 5995 ret = FAILED; 5996 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5997 "0748 abort handler timed out waiting " 5998 "for aborting I/O (xri:x%x) to complete: " 5999 "ret %#x, ID %d, LUN %llu\n", 6000 iocb->sli4_xritag, ret, 6001 cmnd->device->id, cmnd->device->lun); 6002 } 6003 6004 lpfc_cmd->waitq = NULL; 6005 6006 spin_unlock(&lpfc_cmd->buf_lock); 6007 goto out; 6008 6009 out_unlock_ring: 6010 if (phba->sli_rev == LPFC_SLI_REV4) 6011 spin_unlock(&pring_s4->ring_lock); 6012 out_unlock_buf: 6013 spin_unlock(&lpfc_cmd->buf_lock); 6014 out_unlock: 6015 spin_unlock_irqrestore(&phba->hbalock, flags); 6016 out: 6017 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6018 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 6019 "LUN %llu\n", ret, cmnd->device->id, 6020 cmnd->device->lun); 6021 return ret; 6022 } 6023 6024 static char * 6025 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 6026 { 6027 switch (task_mgmt_cmd) { 6028 case FCP_ABORT_TASK_SET: 6029 return "ABORT_TASK_SET"; 6030 case FCP_CLEAR_TASK_SET: 6031 return "FCP_CLEAR_TASK_SET"; 6032 case FCP_BUS_RESET: 6033 return "FCP_BUS_RESET"; 6034 case FCP_LUN_RESET: 6035 return "FCP_LUN_RESET"; 6036 case FCP_TARGET_RESET: 6037 return "FCP_TARGET_RESET"; 6038 case FCP_CLEAR_ACA: 6039 return "FCP_CLEAR_ACA"; 6040 case FCP_TERMINATE_TASK: 6041 return "FCP_TERMINATE_TASK"; 6042 default: 6043 return "unknown"; 6044 } 6045 } 6046 6047 6048 /** 6049 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 6050 * @vport: The virtual port for which this call is being executed. 6051 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 6052 * 6053 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 6054 * 6055 * Return code : 6056 * 0x2003 - Error 6057 * 0x2002 - Success 6058 **/ 6059 static int 6060 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 6061 { 6062 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 6063 uint32_t rsp_info; 6064 uint32_t rsp_len; 6065 uint8_t rsp_info_code; 6066 int ret = FAILED; 6067 6068 6069 if (fcprsp == NULL) 6070 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6071 "0703 fcp_rsp is missing\n"); 6072 else { 6073 rsp_info = fcprsp->rspStatus2; 6074 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 6075 rsp_info_code = fcprsp->rspInfo3; 6076 6077 6078 lpfc_printf_vlog(vport, KERN_INFO, 6079 LOG_FCP, 6080 "0706 fcp_rsp valid 0x%x," 6081 " rsp len=%d code 0x%x\n", 6082 rsp_info, 6083 rsp_len, rsp_info_code); 6084 6085 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 6086 * field specifies the number of valid bytes of FCP_RSP_INFO. 6087 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 6088 */ 6089 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 6090 ((rsp_len == 8) || (rsp_len == 4))) { 6091 switch (rsp_info_code) { 6092 case RSP_NO_FAILURE: 6093 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6094 "0715 Task Mgmt No Failure\n"); 6095 ret = SUCCESS; 6096 break; 6097 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 6098 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6099 "0716 Task Mgmt Target " 6100 "reject\n"); 6101 break; 6102 case RSP_TM_NOT_COMPLETED: /* TM failed */ 6103 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6104 "0717 Task Mgmt Target " 6105 "failed TM\n"); 6106 break; 6107 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 6108 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6109 "0718 Task Mgmt to invalid " 6110 "LUN\n"); 6111 break; 6112 } 6113 } 6114 } 6115 return ret; 6116 } 6117 6118 6119 /** 6120 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 6121 * @vport: The virtual port for which this call is being executed. 6122 * @cmnd: Pointer to scsi_cmnd data structure. 6123 * @tgt_id: Target ID of remote device. 6124 * @lun_id: Lun number for the TMF 6125 * @task_mgmt_cmd: type of TMF to send 6126 * 6127 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 6128 * a remote port. 6129 * 6130 * Return Code: 6131 * 0x2003 - Error 6132 * 0x2002 - Success. 6133 **/ 6134 static int 6135 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, 6136 unsigned int tgt_id, uint64_t lun_id, 6137 uint8_t task_mgmt_cmd) 6138 { 6139 struct lpfc_hba *phba = vport->phba; 6140 struct lpfc_io_buf *lpfc_cmd; 6141 struct lpfc_iocbq *iocbq; 6142 struct lpfc_iocbq *iocbqrsp; 6143 struct lpfc_rport_data *rdata; 6144 struct lpfc_nodelist *pnode; 6145 int ret; 6146 int status; 6147 6148 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6149 if (!rdata || !rdata->pnode) 6150 return FAILED; 6151 pnode = rdata->pnode; 6152 6153 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); 6154 if (lpfc_cmd == NULL) 6155 return FAILED; 6156 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 6157 lpfc_cmd->rdata = rdata; 6158 lpfc_cmd->pCmd = cmnd; 6159 lpfc_cmd->ndlp = pnode; 6160 6161 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 6162 task_mgmt_cmd); 6163 if (!status) { 6164 lpfc_release_scsi_buf(phba, lpfc_cmd); 6165 return FAILED; 6166 } 6167 6168 iocbq = &lpfc_cmd->cur_iocbq; 6169 iocbqrsp = lpfc_sli_get_iocbq(phba); 6170 if (iocbqrsp == NULL) { 6171 lpfc_release_scsi_buf(phba, lpfc_cmd); 6172 return FAILED; 6173 } 6174 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 6175 6176 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6177 "0702 Issue %s to TGT %d LUN %llu " 6178 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 6179 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 6180 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 6181 iocbq->iocb_flag); 6182 6183 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 6184 iocbq, iocbqrsp, lpfc_cmd->timeout); 6185 if ((status != IOCB_SUCCESS) || 6186 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 6187 if (status != IOCB_SUCCESS || 6188 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 6189 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6190 "0727 TMF %s to TGT %d LUN %llu " 6191 "failed (%d, %d) iocb_flag x%x\n", 6192 lpfc_taskmgmt_name(task_mgmt_cmd), 6193 tgt_id, lun_id, 6194 iocbqrsp->iocb.ulpStatus, 6195 iocbqrsp->iocb.un.ulpWord[4], 6196 iocbq->iocb_flag); 6197 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 6198 if (status == IOCB_SUCCESS) { 6199 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 6200 /* Something in the FCP_RSP was invalid. 6201 * Check conditions */ 6202 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 6203 else 6204 ret = FAILED; 6205 } else if (status == IOCB_TIMEDOUT) { 6206 ret = TIMEOUT_ERROR; 6207 } else { 6208 ret = FAILED; 6209 } 6210 } else 6211 ret = SUCCESS; 6212 6213 lpfc_sli_release_iocbq(phba, iocbqrsp); 6214 6215 if (ret != TIMEOUT_ERROR) 6216 lpfc_release_scsi_buf(phba, lpfc_cmd); 6217 6218 return ret; 6219 } 6220 6221 /** 6222 * lpfc_chk_tgt_mapped - 6223 * @vport: The virtual port to check on 6224 * @cmnd: Pointer to scsi_cmnd data structure. 6225 * 6226 * This routine delays until the scsi target (aka rport) for the 6227 * command exists (is present and logged in) or we declare it non-existent. 6228 * 6229 * Return code : 6230 * 0x2003 - Error 6231 * 0x2002 - Success 6232 **/ 6233 static int 6234 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 6235 { 6236 struct lpfc_rport_data *rdata; 6237 struct lpfc_nodelist *pnode; 6238 unsigned long later; 6239 6240 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6241 if (!rdata) { 6242 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6243 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 6244 return FAILED; 6245 } 6246 pnode = rdata->pnode; 6247 /* 6248 * If target is not in a MAPPED state, delay until 6249 * target is rediscovered or devloss timeout expires. 6250 */ 6251 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6252 while (time_after(later, jiffies)) { 6253 if (!pnode) 6254 return FAILED; 6255 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 6256 return SUCCESS; 6257 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 6258 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6259 if (!rdata) 6260 return FAILED; 6261 pnode = rdata->pnode; 6262 } 6263 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 6264 return FAILED; 6265 return SUCCESS; 6266 } 6267 6268 /** 6269 * lpfc_reset_flush_io_context - 6270 * @vport: The virtual port (scsi_host) for the flush context 6271 * @tgt_id: If aborting by Target contect - specifies the target id 6272 * @lun_id: If aborting by Lun context - specifies the lun id 6273 * @context: specifies the context level to flush at. 6274 * 6275 * After a reset condition via TMF, we need to flush orphaned i/o 6276 * contexts from the adapter. This routine aborts any contexts 6277 * outstanding, then waits for their completions. The wait is 6278 * bounded by devloss_tmo though. 6279 * 6280 * Return code : 6281 * 0x2003 - Error 6282 * 0x2002 - Success 6283 **/ 6284 static int 6285 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 6286 uint64_t lun_id, lpfc_ctx_cmd context) 6287 { 6288 struct lpfc_hba *phba = vport->phba; 6289 unsigned long later; 6290 int cnt; 6291 6292 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6293 if (cnt) 6294 lpfc_sli_abort_taskmgmt(vport, 6295 &phba->sli.sli3_ring[LPFC_FCP_RING], 6296 tgt_id, lun_id, context); 6297 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6298 while (time_after(later, jiffies) && cnt) { 6299 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 6300 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6301 } 6302 if (cnt) { 6303 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6304 "0724 I/O flush failure for context %s : cnt x%x\n", 6305 ((context == LPFC_CTX_LUN) ? "LUN" : 6306 ((context == LPFC_CTX_TGT) ? "TGT" : 6307 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 6308 cnt); 6309 return FAILED; 6310 } 6311 return SUCCESS; 6312 } 6313 6314 /** 6315 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 6316 * @cmnd: Pointer to scsi_cmnd data structure. 6317 * 6318 * This routine does a device reset by sending a LUN_RESET task management 6319 * command. 6320 * 6321 * Return code : 6322 * 0x2003 - Error 6323 * 0x2002 - Success 6324 **/ 6325 static int 6326 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 6327 { 6328 struct Scsi_Host *shost = cmnd->device->host; 6329 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6330 struct lpfc_rport_data *rdata; 6331 struct lpfc_nodelist *pnode; 6332 unsigned tgt_id = cmnd->device->id; 6333 uint64_t lun_id = cmnd->device->lun; 6334 struct lpfc_scsi_event_header scsi_event; 6335 int status; 6336 u32 logit = LOG_FCP; 6337 6338 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6339 if (!rdata || !rdata->pnode) { 6340 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6341 "0798 Device Reset rdata failure: rdata x%px\n", 6342 rdata); 6343 return FAILED; 6344 } 6345 pnode = rdata->pnode; 6346 status = fc_block_scsi_eh(cmnd); 6347 if (status != 0 && status != SUCCESS) 6348 return status; 6349 6350 status = lpfc_chk_tgt_mapped(vport, cmnd); 6351 if (status == FAILED) { 6352 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6353 "0721 Device Reset rport failure: rdata x%px\n", rdata); 6354 return FAILED; 6355 } 6356 6357 scsi_event.event_type = FC_REG_SCSI_EVENT; 6358 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 6359 scsi_event.lun = lun_id; 6360 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6361 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6362 6363 fc_host_post_vendor_event(shost, fc_get_event_number(), 6364 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6365 6366 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6367 FCP_LUN_RESET); 6368 if (status != SUCCESS) 6369 logit = LOG_TRACE_EVENT; 6370 6371 lpfc_printf_vlog(vport, KERN_ERR, logit, 6372 "0713 SCSI layer issued Device Reset (%d, %llu) " 6373 "return x%x\n", tgt_id, lun_id, status); 6374 6375 /* 6376 * We have to clean up i/o as : they may be orphaned by the TMF; 6377 * or if the TMF failed, they may be in an indeterminate state. 6378 * So, continue on. 6379 * We will report success if all the i/o aborts successfully. 6380 */ 6381 if (status == SUCCESS) 6382 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6383 LPFC_CTX_LUN); 6384 6385 return status; 6386 } 6387 6388 /** 6389 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 6390 * @cmnd: Pointer to scsi_cmnd data structure. 6391 * 6392 * This routine does a target reset by sending a TARGET_RESET task management 6393 * command. 6394 * 6395 * Return code : 6396 * 0x2003 - Error 6397 * 0x2002 - Success 6398 **/ 6399 static int 6400 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 6401 { 6402 struct Scsi_Host *shost = cmnd->device->host; 6403 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6404 struct lpfc_rport_data *rdata; 6405 struct lpfc_nodelist *pnode; 6406 unsigned tgt_id = cmnd->device->id; 6407 uint64_t lun_id = cmnd->device->lun; 6408 struct lpfc_scsi_event_header scsi_event; 6409 int status; 6410 u32 logit = LOG_FCP; 6411 u32 dev_loss_tmo = vport->cfg_devloss_tmo; 6412 unsigned long flags; 6413 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 6414 6415 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6416 if (!rdata || !rdata->pnode) { 6417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6418 "0799 Target Reset rdata failure: rdata x%px\n", 6419 rdata); 6420 return FAILED; 6421 } 6422 pnode = rdata->pnode; 6423 status = fc_block_scsi_eh(cmnd); 6424 if (status != 0 && status != SUCCESS) 6425 return status; 6426 6427 status = lpfc_chk_tgt_mapped(vport, cmnd); 6428 if (status == FAILED) { 6429 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6430 "0722 Target Reset rport failure: rdata x%px\n", rdata); 6431 if (pnode) { 6432 spin_lock_irqsave(&pnode->lock, flags); 6433 pnode->nlp_flag &= ~NLP_NPR_ADISC; 6434 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6435 spin_unlock_irqrestore(&pnode->lock, flags); 6436 } 6437 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6438 LPFC_CTX_TGT); 6439 return FAST_IO_FAIL; 6440 } 6441 6442 scsi_event.event_type = FC_REG_SCSI_EVENT; 6443 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 6444 scsi_event.lun = 0; 6445 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6446 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6447 6448 fc_host_post_vendor_event(shost, fc_get_event_number(), 6449 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6450 6451 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6452 FCP_TARGET_RESET); 6453 if (status != SUCCESS) { 6454 logit = LOG_TRACE_EVENT; 6455 6456 /* Issue LOGO, if no LOGO is outstanding */ 6457 spin_lock_irqsave(&pnode->lock, flags); 6458 if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && 6459 !pnode->logo_waitq) { 6460 pnode->logo_waitq = &waitq; 6461 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6462 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6463 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; 6464 spin_unlock_irqrestore(&pnode->lock, flags); 6465 lpfc_unreg_rpi(vport, pnode); 6466 wait_event_timeout(waitq, 6467 (!(pnode->upcall_flags & 6468 NLP_WAIT_FOR_LOGO)), 6469 msecs_to_jiffies(dev_loss_tmo * 6470 1000)); 6471 6472 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { 6473 lpfc_printf_vlog(vport, KERN_ERR, logit, 6474 "0725 SCSI layer TGTRST " 6475 "failed & LOGO TMO (%d, %llu) " 6476 "return x%x\n", 6477 tgt_id, lun_id, status); 6478 spin_lock_irqsave(&pnode->lock, flags); 6479 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 6480 } else { 6481 spin_lock_irqsave(&pnode->lock, flags); 6482 } 6483 pnode->logo_waitq = NULL; 6484 spin_unlock_irqrestore(&pnode->lock, flags); 6485 status = SUCCESS; 6486 6487 } else { 6488 spin_unlock_irqrestore(&pnode->lock, flags); 6489 status = FAILED; 6490 } 6491 } 6492 6493 lpfc_printf_vlog(vport, KERN_ERR, logit, 6494 "0723 SCSI layer issued Target Reset (%d, %llu) " 6495 "return x%x\n", tgt_id, lun_id, status); 6496 6497 /* 6498 * We have to clean up i/o as : they may be orphaned by the TMF; 6499 * or if the TMF failed, they may be in an indeterminate state. 6500 * So, continue on. 6501 * We will report success if all the i/o aborts successfully. 6502 */ 6503 if (status == SUCCESS) 6504 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6505 LPFC_CTX_TGT); 6506 return status; 6507 } 6508 6509 /** 6510 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 6511 * @cmnd: Pointer to scsi_cmnd data structure. 6512 * 6513 * This routine does target reset to all targets on @cmnd->device->host. 6514 * This emulates Parallel SCSI Bus Reset Semantics. 6515 * 6516 * Return code : 6517 * 0x2003 - Error 6518 * 0x2002 - Success 6519 **/ 6520 static int 6521 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 6522 { 6523 struct Scsi_Host *shost = cmnd->device->host; 6524 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6525 struct lpfc_nodelist *ndlp = NULL; 6526 struct lpfc_scsi_event_header scsi_event; 6527 int match; 6528 int ret = SUCCESS, status, i; 6529 u32 logit = LOG_FCP; 6530 6531 scsi_event.event_type = FC_REG_SCSI_EVENT; 6532 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 6533 scsi_event.lun = 0; 6534 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 6535 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 6536 6537 fc_host_post_vendor_event(shost, fc_get_event_number(), 6538 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6539 6540 status = fc_block_scsi_eh(cmnd); 6541 if (status != 0 && status != SUCCESS) 6542 return status; 6543 6544 /* 6545 * Since the driver manages a single bus device, reset all 6546 * targets known to the driver. Should any target reset 6547 * fail, this routine returns failure to the midlayer. 6548 */ 6549 for (i = 0; i < LPFC_MAX_TARGET; i++) { 6550 /* Search for mapped node by target ID */ 6551 match = 0; 6552 spin_lock_irq(shost->host_lock); 6553 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6554 6555 if (vport->phba->cfg_fcp2_no_tgt_reset && 6556 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 6557 continue; 6558 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6559 ndlp->nlp_sid == i && 6560 ndlp->rport && 6561 ndlp->nlp_type & NLP_FCP_TARGET) { 6562 match = 1; 6563 break; 6564 } 6565 } 6566 spin_unlock_irq(shost->host_lock); 6567 if (!match) 6568 continue; 6569 6570 status = lpfc_send_taskmgmt(vport, cmnd, 6571 i, 0, FCP_TARGET_RESET); 6572 6573 if (status != SUCCESS) { 6574 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6575 "0700 Bus Reset on target %d failed\n", 6576 i); 6577 ret = FAILED; 6578 } 6579 } 6580 /* 6581 * We have to clean up i/o as : they may be orphaned by the TMFs 6582 * above; or if any of the TMFs failed, they may be in an 6583 * indeterminate state. 6584 * We will report success if all the i/o aborts successfully. 6585 */ 6586 6587 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 6588 if (status != SUCCESS) 6589 ret = FAILED; 6590 if (ret == FAILED) 6591 logit = LOG_TRACE_EVENT; 6592 6593 lpfc_printf_vlog(vport, KERN_ERR, logit, 6594 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 6595 return ret; 6596 } 6597 6598 /** 6599 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6600 * @cmnd: Pointer to scsi_cmnd data structure. 6601 * 6602 * This routine does host reset to the adaptor port. It brings the HBA 6603 * offline, performs a board restart, and then brings the board back online. 6604 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6605 * reject all outstanding SCSI commands to the host and error returned 6606 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6607 * of error handling, it will only return error if resetting of the adapter 6608 * is not successful; in all other cases, will return success. 6609 * 6610 * Return code : 6611 * 0x2003 - Error 6612 * 0x2002 - Success 6613 **/ 6614 static int 6615 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6616 { 6617 struct Scsi_Host *shost = cmnd->device->host; 6618 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6619 struct lpfc_hba *phba = vport->phba; 6620 int rc, ret = SUCCESS; 6621 6622 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6623 "3172 SCSI layer issued Host Reset Data:\n"); 6624 6625 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6626 lpfc_offline(phba); 6627 rc = lpfc_sli_brdrestart(phba); 6628 if (rc) 6629 goto error; 6630 6631 rc = lpfc_online(phba); 6632 if (rc) 6633 goto error; 6634 6635 lpfc_unblock_mgmt_io(phba); 6636 6637 return ret; 6638 error: 6639 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6640 "3323 Failed host reset\n"); 6641 lpfc_unblock_mgmt_io(phba); 6642 return FAILED; 6643 } 6644 6645 /** 6646 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6647 * @sdev: Pointer to scsi_device. 6648 * 6649 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6650 * globally available list of scsi buffers. This routine also makes sure scsi 6651 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6652 * of scsi buffer exists for the lifetime of the driver. 6653 * 6654 * Return codes: 6655 * non-0 - Error 6656 * 0 - Success 6657 **/ 6658 static int 6659 lpfc_slave_alloc(struct scsi_device *sdev) 6660 { 6661 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6662 struct lpfc_hba *phba = vport->phba; 6663 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6664 uint32_t total = 0; 6665 uint32_t num_to_alloc = 0; 6666 int num_allocated = 0; 6667 uint32_t sdev_cnt; 6668 struct lpfc_device_data *device_data; 6669 unsigned long flags; 6670 struct lpfc_name target_wwpn; 6671 6672 if (!rport || fc_remote_port_chkready(rport)) 6673 return -ENXIO; 6674 6675 if (phba->cfg_fof) { 6676 6677 /* 6678 * Check to see if the device data structure for the lun 6679 * exists. If not, create one. 6680 */ 6681 6682 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6683 spin_lock_irqsave(&phba->devicelock, flags); 6684 device_data = __lpfc_get_device_data(phba, 6685 &phba->luns, 6686 &vport->fc_portname, 6687 &target_wwpn, 6688 sdev->lun); 6689 if (!device_data) { 6690 spin_unlock_irqrestore(&phba->devicelock, flags); 6691 device_data = lpfc_create_device_data(phba, 6692 &vport->fc_portname, 6693 &target_wwpn, 6694 sdev->lun, 6695 phba->cfg_XLanePriority, 6696 true); 6697 if (!device_data) 6698 return -ENOMEM; 6699 spin_lock_irqsave(&phba->devicelock, flags); 6700 list_add_tail(&device_data->listentry, &phba->luns); 6701 } 6702 device_data->rport_data = rport->dd_data; 6703 device_data->available = true; 6704 spin_unlock_irqrestore(&phba->devicelock, flags); 6705 sdev->hostdata = device_data; 6706 } else { 6707 sdev->hostdata = rport->dd_data; 6708 } 6709 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6710 6711 /* For SLI4, all IO buffers are pre-allocated */ 6712 if (phba->sli_rev == LPFC_SLI_REV4) 6713 return 0; 6714 6715 /* This code path is now ONLY for SLI3 adapters */ 6716 6717 /* 6718 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6719 * available list of scsi buffers. Don't allocate more than the 6720 * HBA limit conveyed to the midlayer via the host structure. The 6721 * formula accounts for the lun_queue_depth + error handlers + 1 6722 * extra. This list of scsi bufs exists for the lifetime of the driver. 6723 */ 6724 total = phba->total_scsi_bufs; 6725 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6726 6727 /* If allocated buffers are enough do nothing */ 6728 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6729 return 0; 6730 6731 /* Allow some exchanges to be available always to complete discovery */ 6732 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6733 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6734 "0704 At limitation of %d preallocated " 6735 "command buffers\n", total); 6736 return 0; 6737 /* Allow some exchanges to be available always to complete discovery */ 6738 } else if (total + num_to_alloc > 6739 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6740 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6741 "0705 Allocation request of %d " 6742 "command buffers will exceed max of %d. " 6743 "Reducing allocation request to %d.\n", 6744 num_to_alloc, phba->cfg_hba_queue_depth, 6745 (phba->cfg_hba_queue_depth - total)); 6746 num_to_alloc = phba->cfg_hba_queue_depth - total; 6747 } 6748 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6749 if (num_to_alloc != num_allocated) { 6750 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6751 "0708 Allocation request of %d " 6752 "command buffers did not succeed. " 6753 "Allocated %d buffers.\n", 6754 num_to_alloc, num_allocated); 6755 } 6756 if (num_allocated > 0) 6757 phba->total_scsi_bufs += num_allocated; 6758 return 0; 6759 } 6760 6761 /** 6762 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6763 * @sdev: Pointer to scsi_device. 6764 * 6765 * This routine configures following items 6766 * - Tag command queuing support for @sdev if supported. 6767 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6768 * 6769 * Return codes: 6770 * 0 - Success 6771 **/ 6772 static int 6773 lpfc_slave_configure(struct scsi_device *sdev) 6774 { 6775 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6776 struct lpfc_hba *phba = vport->phba; 6777 6778 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6779 6780 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6781 lpfc_sli_handle_fast_ring_event(phba, 6782 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6783 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6784 lpfc_poll_rearm_timer(phba); 6785 } 6786 6787 return 0; 6788 } 6789 6790 /** 6791 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6792 * @sdev: Pointer to scsi_device. 6793 * 6794 * This routine sets @sdev hostatdata filed to null. 6795 **/ 6796 static void 6797 lpfc_slave_destroy(struct scsi_device *sdev) 6798 { 6799 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6800 struct lpfc_hba *phba = vport->phba; 6801 unsigned long flags; 6802 struct lpfc_device_data *device_data = sdev->hostdata; 6803 6804 atomic_dec(&phba->sdev_cnt); 6805 if ((phba->cfg_fof) && (device_data)) { 6806 spin_lock_irqsave(&phba->devicelock, flags); 6807 device_data->available = false; 6808 if (!device_data->oas_enabled) 6809 lpfc_delete_device_data(phba, device_data); 6810 spin_unlock_irqrestore(&phba->devicelock, flags); 6811 } 6812 sdev->hostdata = NULL; 6813 return; 6814 } 6815 6816 /** 6817 * lpfc_create_device_data - creates and initializes device data structure for OAS 6818 * @phba: Pointer to host bus adapter structure. 6819 * @vport_wwpn: Pointer to vport's wwpn information 6820 * @target_wwpn: Pointer to target's wwpn information 6821 * @lun: Lun on target 6822 * @pri: Priority 6823 * @atomic_create: Flag to indicate if memory should be allocated using the 6824 * GFP_ATOMIC flag or not. 6825 * 6826 * This routine creates a device data structure which will contain identifying 6827 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6828 * whether or not the corresponding lun is available by the system, 6829 * and pointer to the rport data. 6830 * 6831 * Return codes: 6832 * NULL - Error 6833 * Pointer to lpfc_device_data - Success 6834 **/ 6835 struct lpfc_device_data* 6836 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6837 struct lpfc_name *target_wwpn, uint64_t lun, 6838 uint32_t pri, bool atomic_create) 6839 { 6840 6841 struct lpfc_device_data *lun_info; 6842 int memory_flags; 6843 6844 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6845 !(phba->cfg_fof)) 6846 return NULL; 6847 6848 /* Attempt to create the device data to contain lun info */ 6849 6850 if (atomic_create) 6851 memory_flags = GFP_ATOMIC; 6852 else 6853 memory_flags = GFP_KERNEL; 6854 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6855 if (!lun_info) 6856 return NULL; 6857 INIT_LIST_HEAD(&lun_info->listentry); 6858 lun_info->rport_data = NULL; 6859 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6860 sizeof(struct lpfc_name)); 6861 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6862 sizeof(struct lpfc_name)); 6863 lun_info->device_id.lun = lun; 6864 lun_info->oas_enabled = false; 6865 lun_info->priority = pri; 6866 lun_info->available = false; 6867 return lun_info; 6868 } 6869 6870 /** 6871 * lpfc_delete_device_data - frees a device data structure for OAS 6872 * @phba: Pointer to host bus adapter structure. 6873 * @lun_info: Pointer to device data structure to free. 6874 * 6875 * This routine frees the previously allocated device data structure passed. 6876 * 6877 **/ 6878 void 6879 lpfc_delete_device_data(struct lpfc_hba *phba, 6880 struct lpfc_device_data *lun_info) 6881 { 6882 6883 if (unlikely(!phba) || !lun_info || 6884 !(phba->cfg_fof)) 6885 return; 6886 6887 if (!list_empty(&lun_info->listentry)) 6888 list_del(&lun_info->listentry); 6889 mempool_free(lun_info, phba->device_data_mem_pool); 6890 return; 6891 } 6892 6893 /** 6894 * __lpfc_get_device_data - returns the device data for the specified lun 6895 * @phba: Pointer to host bus adapter structure. 6896 * @list: Point to list to search. 6897 * @vport_wwpn: Pointer to vport's wwpn information 6898 * @target_wwpn: Pointer to target's wwpn information 6899 * @lun: Lun on target 6900 * 6901 * This routine searches the list passed for the specified lun's device data. 6902 * This function does not hold locks, it is the responsibility of the caller 6903 * to ensure the proper lock is held before calling the function. 6904 * 6905 * Return codes: 6906 * NULL - Error 6907 * Pointer to lpfc_device_data - Success 6908 **/ 6909 struct lpfc_device_data* 6910 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6911 struct lpfc_name *vport_wwpn, 6912 struct lpfc_name *target_wwpn, uint64_t lun) 6913 { 6914 6915 struct lpfc_device_data *lun_info; 6916 6917 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6918 !phba->cfg_fof) 6919 return NULL; 6920 6921 /* Check to see if the lun is already enabled for OAS. */ 6922 6923 list_for_each_entry(lun_info, list, listentry) { 6924 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6925 sizeof(struct lpfc_name)) == 0) && 6926 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6927 sizeof(struct lpfc_name)) == 0) && 6928 (lun_info->device_id.lun == lun)) 6929 return lun_info; 6930 } 6931 6932 return NULL; 6933 } 6934 6935 /** 6936 * lpfc_find_next_oas_lun - searches for the next oas lun 6937 * @phba: Pointer to host bus adapter structure. 6938 * @vport_wwpn: Pointer to vport's wwpn information 6939 * @target_wwpn: Pointer to target's wwpn information 6940 * @starting_lun: Pointer to the lun to start searching for 6941 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6942 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6943 * @found_lun: Pointer to the found lun. 6944 * @found_lun_status: Pointer to status of the found lun. 6945 * @found_lun_pri: Pointer to priority of the found lun. 6946 * 6947 * This routine searches the luns list for the specified lun 6948 * or the first lun for the vport/target. If the vport wwpn contains 6949 * a zero value then a specific vport is not specified. In this case 6950 * any vport which contains the lun will be considered a match. If the 6951 * target wwpn contains a zero value then a specific target is not specified. 6952 * In this case any target which contains the lun will be considered a 6953 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6954 * are returned. The function will also return the next lun if available. 6955 * If the next lun is not found, starting_lun parameter will be set to 6956 * NO_MORE_OAS_LUN. 6957 * 6958 * Return codes: 6959 * non-0 - Error 6960 * 0 - Success 6961 **/ 6962 bool 6963 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6964 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6965 struct lpfc_name *found_vport_wwpn, 6966 struct lpfc_name *found_target_wwpn, 6967 uint64_t *found_lun, 6968 uint32_t *found_lun_status, 6969 uint32_t *found_lun_pri) 6970 { 6971 6972 unsigned long flags; 6973 struct lpfc_device_data *lun_info; 6974 struct lpfc_device_id *device_id; 6975 uint64_t lun; 6976 bool found = false; 6977 6978 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6979 !starting_lun || !found_vport_wwpn || 6980 !found_target_wwpn || !found_lun || !found_lun_status || 6981 (*starting_lun == NO_MORE_OAS_LUN) || 6982 !phba->cfg_fof) 6983 return false; 6984 6985 lun = *starting_lun; 6986 *found_lun = NO_MORE_OAS_LUN; 6987 *starting_lun = NO_MORE_OAS_LUN; 6988 6989 /* Search for lun or the lun closet in value */ 6990 6991 spin_lock_irqsave(&phba->devicelock, flags); 6992 list_for_each_entry(lun_info, &phba->luns, listentry) { 6993 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6994 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6995 sizeof(struct lpfc_name)) == 0)) && 6996 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 6997 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6998 sizeof(struct lpfc_name)) == 0)) && 6999 (lun_info->oas_enabled)) { 7000 device_id = &lun_info->device_id; 7001 if ((!found) && 7002 ((lun == FIND_FIRST_OAS_LUN) || 7003 (device_id->lun == lun))) { 7004 *found_lun = device_id->lun; 7005 memcpy(found_vport_wwpn, 7006 &device_id->vport_wwpn, 7007 sizeof(struct lpfc_name)); 7008 memcpy(found_target_wwpn, 7009 &device_id->target_wwpn, 7010 sizeof(struct lpfc_name)); 7011 if (lun_info->available) 7012 *found_lun_status = 7013 OAS_LUN_STATUS_EXISTS; 7014 else 7015 *found_lun_status = 0; 7016 *found_lun_pri = lun_info->priority; 7017 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 7018 memset(vport_wwpn, 0x0, 7019 sizeof(struct lpfc_name)); 7020 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 7021 memset(target_wwpn, 0x0, 7022 sizeof(struct lpfc_name)); 7023 found = true; 7024 } else if (found) { 7025 *starting_lun = device_id->lun; 7026 memcpy(vport_wwpn, &device_id->vport_wwpn, 7027 sizeof(struct lpfc_name)); 7028 memcpy(target_wwpn, &device_id->target_wwpn, 7029 sizeof(struct lpfc_name)); 7030 break; 7031 } 7032 } 7033 } 7034 spin_unlock_irqrestore(&phba->devicelock, flags); 7035 return found; 7036 } 7037 7038 /** 7039 * lpfc_enable_oas_lun - enables a lun for OAS operations 7040 * @phba: Pointer to host bus adapter structure. 7041 * @vport_wwpn: Pointer to vport's wwpn information 7042 * @target_wwpn: Pointer to target's wwpn information 7043 * @lun: Lun 7044 * @pri: Priority 7045 * 7046 * This routine enables a lun for oas operations. The routines does so by 7047 * doing the following : 7048 * 7049 * 1) Checks to see if the device data for the lun has been created. 7050 * 2) If found, sets the OAS enabled flag if not set and returns. 7051 * 3) Otherwise, creates a device data structure. 7052 * 4) If successfully created, indicates the device data is for an OAS lun, 7053 * indicates the lun is not available and add to the list of luns. 7054 * 7055 * Return codes: 7056 * false - Error 7057 * true - Success 7058 **/ 7059 bool 7060 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 7061 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 7062 { 7063 7064 struct lpfc_device_data *lun_info; 7065 unsigned long flags; 7066 7067 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 7068 !phba->cfg_fof) 7069 return false; 7070 7071 spin_lock_irqsave(&phba->devicelock, flags); 7072 7073 /* Check to see if the device data for the lun has been created */ 7074 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 7075 target_wwpn, lun); 7076 if (lun_info) { 7077 if (!lun_info->oas_enabled) 7078 lun_info->oas_enabled = true; 7079 lun_info->priority = pri; 7080 spin_unlock_irqrestore(&phba->devicelock, flags); 7081 return true; 7082 } 7083 7084 /* Create an lun info structure and add to list of luns */ 7085 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 7086 pri, true); 7087 if (lun_info) { 7088 lun_info->oas_enabled = true; 7089 lun_info->priority = pri; 7090 lun_info->available = false; 7091 list_add_tail(&lun_info->listentry, &phba->luns); 7092 spin_unlock_irqrestore(&phba->devicelock, flags); 7093 return true; 7094 } 7095 spin_unlock_irqrestore(&phba->devicelock, flags); 7096 return false; 7097 } 7098 7099 /** 7100 * lpfc_disable_oas_lun - disables a lun for OAS operations 7101 * @phba: Pointer to host bus adapter structure. 7102 * @vport_wwpn: Pointer to vport's wwpn information 7103 * @target_wwpn: Pointer to target's wwpn information 7104 * @lun: Lun 7105 * @pri: Priority 7106 * 7107 * This routine disables a lun for oas operations. The routines does so by 7108 * doing the following : 7109 * 7110 * 1) Checks to see if the device data for the lun is created. 7111 * 2) If present, clears the flag indicating this lun is for OAS. 7112 * 3) If the lun is not available by the system, the device data is 7113 * freed. 7114 * 7115 * Return codes: 7116 * false - Error 7117 * true - Success 7118 **/ 7119 bool 7120 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 7121 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 7122 { 7123 7124 struct lpfc_device_data *lun_info; 7125 unsigned long flags; 7126 7127 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 7128 !phba->cfg_fof) 7129 return false; 7130 7131 spin_lock_irqsave(&phba->devicelock, flags); 7132 7133 /* Check to see if the lun is available. */ 7134 lun_info = __lpfc_get_device_data(phba, 7135 &phba->luns, vport_wwpn, 7136 target_wwpn, lun); 7137 if (lun_info) { 7138 lun_info->oas_enabled = false; 7139 lun_info->priority = pri; 7140 if (!lun_info->available) 7141 lpfc_delete_device_data(phba, lun_info); 7142 spin_unlock_irqrestore(&phba->devicelock, flags); 7143 return true; 7144 } 7145 7146 spin_unlock_irqrestore(&phba->devicelock, flags); 7147 return false; 7148 } 7149 7150 static int 7151 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 7152 { 7153 return SCSI_MLQUEUE_HOST_BUSY; 7154 } 7155 7156 static int 7157 lpfc_no_handler(struct scsi_cmnd *cmnd) 7158 { 7159 return FAILED; 7160 } 7161 7162 static int 7163 lpfc_no_slave(struct scsi_device *sdev) 7164 { 7165 return -ENODEV; 7166 } 7167 7168 struct scsi_host_template lpfc_template_nvme = { 7169 .module = THIS_MODULE, 7170 .name = LPFC_DRIVER_NAME, 7171 .proc_name = LPFC_DRIVER_NAME, 7172 .info = lpfc_info, 7173 .queuecommand = lpfc_no_command, 7174 .eh_abort_handler = lpfc_no_handler, 7175 .eh_device_reset_handler = lpfc_no_handler, 7176 .eh_target_reset_handler = lpfc_no_handler, 7177 .eh_bus_reset_handler = lpfc_no_handler, 7178 .eh_host_reset_handler = lpfc_no_handler, 7179 .slave_alloc = lpfc_no_slave, 7180 .slave_configure = lpfc_no_slave, 7181 .scan_finished = lpfc_scan_finished, 7182 .this_id = -1, 7183 .sg_tablesize = 1, 7184 .cmd_per_lun = 1, 7185 .shost_attrs = lpfc_hba_attrs, 7186 .max_sectors = 0xFFFFFFFF, 7187 .vendor_id = LPFC_NL_VENDOR_ID, 7188 .track_queue_depth = 0, 7189 }; 7190 7191 struct scsi_host_template lpfc_template = { 7192 .module = THIS_MODULE, 7193 .name = LPFC_DRIVER_NAME, 7194 .proc_name = LPFC_DRIVER_NAME, 7195 .info = lpfc_info, 7196 .queuecommand = lpfc_queuecommand, 7197 .eh_timed_out = fc_eh_timed_out, 7198 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 7199 .eh_abort_handler = lpfc_abort_handler, 7200 .eh_device_reset_handler = lpfc_device_reset_handler, 7201 .eh_target_reset_handler = lpfc_target_reset_handler, 7202 .eh_bus_reset_handler = lpfc_bus_reset_handler, 7203 .eh_host_reset_handler = lpfc_host_reset_handler, 7204 .slave_alloc = lpfc_slave_alloc, 7205 .slave_configure = lpfc_slave_configure, 7206 .slave_destroy = lpfc_slave_destroy, 7207 .scan_finished = lpfc_scan_finished, 7208 .this_id = -1, 7209 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 7210 .cmd_per_lun = LPFC_CMD_PER_LUN, 7211 .shost_attrs = lpfc_hba_attrs, 7212 .max_sectors = 0xFFFFFFFF, 7213 .vendor_id = LPFC_NL_VENDOR_ID, 7214 .change_queue_depth = scsi_change_queue_depth, 7215 .track_queue_depth = 1, 7216 }; 7217