1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <linux/blk-cgroup.h> 32 #include <net/checksum.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_eh.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <scsi/scsi_transport_fc.h> 40 41 #include "lpfc_version.h" 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_logmsg.h" 51 #include "lpfc_crtn.h" 52 #include "lpfc_vport.h" 53 54 #define LPFC_RESET_WAIT 2 55 #define LPFC_ABORT_WAIT 2 56 57 static char *dif_op_str[] = { 58 "PROT_NORMAL", 59 "PROT_READ_INSERT", 60 "PROT_WRITE_STRIP", 61 "PROT_READ_STRIP", 62 "PROT_WRITE_INSERT", 63 "PROT_READ_PASS", 64 "PROT_WRITE_PASS", 65 }; 66 67 struct scsi_dif_tuple { 68 __be16 guard_tag; /* Checksum */ 69 __be16 app_tag; /* Opaque storage */ 70 __be32 ref_tag; /* Target LBA or indirect LBA */ 71 }; 72 73 static struct lpfc_rport_data * 74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 75 { 76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 77 78 if (vport->phba->cfg_fof) 79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 80 else 81 return (struct lpfc_rport_data *)sdev->hostdata; 82 } 83 84 static void 85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 86 static void 87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 88 static int 89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 90 static void 91 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 92 struct lpfc_vmid *vmp); 93 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 94 *cmd, struct lpfc_vmid *vmp, 95 union lpfc_vmid_io_tag *tag); 96 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 97 struct lpfc_vmid *vmid); 98 99 /** 100 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 101 * @phba: Pointer to HBA object. 102 * @lpfc_cmd: lpfc scsi command object pointer. 103 * 104 * This function is called from the lpfc_prep_task_mgmt_cmd function to 105 * set the last bit in the response sge entry. 106 **/ 107 static void 108 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 109 struct lpfc_io_buf *lpfc_cmd) 110 { 111 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 112 if (sgl) { 113 sgl += 1; 114 sgl->word2 = le32_to_cpu(sgl->word2); 115 bf_set(lpfc_sli4_sge_last, sgl, 1); 116 sgl->word2 = cpu_to_le32(sgl->word2); 117 } 118 } 119 120 #define LPFC_INVALID_REFTAG ((u32)-1) 121 122 /** 123 * lpfc_update_stats - Update statistical data for the command completion 124 * @vport: The virtual port on which this call is executing. 125 * @lpfc_cmd: lpfc scsi command object pointer. 126 * 127 * This function is called when there is a command completion and this 128 * function updates the statistical data for the command completion. 129 **/ 130 static void 131 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 132 { 133 struct lpfc_hba *phba = vport->phba; 134 struct lpfc_rport_data *rdata; 135 struct lpfc_nodelist *pnode; 136 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 137 unsigned long flags; 138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 139 unsigned long latency; 140 int i; 141 142 if (!vport->stat_data_enabled || 143 vport->stat_data_blocked || 144 (cmd->result)) 145 return; 146 147 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 148 rdata = lpfc_cmd->rdata; 149 pnode = rdata->pnode; 150 151 spin_lock_irqsave(shost->host_lock, flags); 152 if (!pnode || 153 !pnode->lat_data || 154 (phba->bucket_type == LPFC_NO_BUCKET)) { 155 spin_unlock_irqrestore(shost->host_lock, flags); 156 return; 157 } 158 159 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 160 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 161 phba->bucket_step; 162 /* check array subscript bounds */ 163 if (i < 0) 164 i = 0; 165 else if (i >= LPFC_MAX_BUCKET_COUNT) 166 i = LPFC_MAX_BUCKET_COUNT - 1; 167 } else { 168 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 169 if (latency <= (phba->bucket_base + 170 ((1<<i)*phba->bucket_step))) 171 break; 172 } 173 174 pnode->lat_data[i].cmd_count++; 175 spin_unlock_irqrestore(shost->host_lock, flags); 176 } 177 178 /** 179 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 180 * @phba: The Hba for which this call is being executed. 181 * 182 * This routine is called when there is resource error in driver or firmware. 183 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 184 * posts at most 1 event each second. This routine wakes up worker thread of 185 * @phba to process WORKER_RAM_DOWN_EVENT event. 186 * 187 * This routine should be called with no lock held. 188 **/ 189 void 190 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 191 { 192 unsigned long flags; 193 uint32_t evt_posted; 194 unsigned long expires; 195 196 spin_lock_irqsave(&phba->hbalock, flags); 197 atomic_inc(&phba->num_rsrc_err); 198 phba->last_rsrc_error_time = jiffies; 199 200 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 201 if (time_after(expires, jiffies)) { 202 spin_unlock_irqrestore(&phba->hbalock, flags); 203 return; 204 } 205 206 phba->last_ramp_down_time = jiffies; 207 208 spin_unlock_irqrestore(&phba->hbalock, flags); 209 210 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 211 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 212 if (!evt_posted) 213 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 214 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 215 216 if (!evt_posted) 217 lpfc_worker_wake_up(phba); 218 return; 219 } 220 221 /** 222 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 223 * @phba: The Hba for which this call is being executed. 224 * 225 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 226 * thread.This routine reduces queue depth for all scsi device on each vport 227 * associated with @phba. 228 **/ 229 void 230 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 231 { 232 struct lpfc_vport **vports; 233 struct Scsi_Host *shost; 234 struct scsi_device *sdev; 235 unsigned long new_queue_depth; 236 unsigned long num_rsrc_err, num_cmd_success; 237 int i; 238 239 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 240 num_cmd_success = atomic_read(&phba->num_cmd_success); 241 242 /* 243 * The error and success command counters are global per 244 * driver instance. If another handler has already 245 * operated on this error event, just exit. 246 */ 247 if (num_rsrc_err == 0) 248 return; 249 250 vports = lpfc_create_vport_work_array(phba); 251 if (vports != NULL) 252 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 253 shost = lpfc_shost_from_vport(vports[i]); 254 shost_for_each_device(sdev, shost) { 255 new_queue_depth = 256 sdev->queue_depth * num_rsrc_err / 257 (num_rsrc_err + num_cmd_success); 258 if (!new_queue_depth) 259 new_queue_depth = sdev->queue_depth - 1; 260 else 261 new_queue_depth = sdev->queue_depth - 262 new_queue_depth; 263 scsi_change_queue_depth(sdev, new_queue_depth); 264 } 265 } 266 lpfc_destroy_vport_work_array(phba, vports); 267 atomic_set(&phba->num_rsrc_err, 0); 268 atomic_set(&phba->num_cmd_success, 0); 269 } 270 271 /** 272 * lpfc_scsi_dev_block - set all scsi hosts to block state 273 * @phba: Pointer to HBA context object. 274 * 275 * This function walks vport list and set each SCSI host to block state 276 * by invoking fc_remote_port_delete() routine. This function is invoked 277 * with EEH when device's PCI slot has been permanently disabled. 278 **/ 279 void 280 lpfc_scsi_dev_block(struct lpfc_hba *phba) 281 { 282 struct lpfc_vport **vports; 283 struct Scsi_Host *shost; 284 struct scsi_device *sdev; 285 struct fc_rport *rport; 286 int i; 287 288 vports = lpfc_create_vport_work_array(phba); 289 if (vports != NULL) 290 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 291 shost = lpfc_shost_from_vport(vports[i]); 292 shost_for_each_device(sdev, shost) { 293 rport = starget_to_rport(scsi_target(sdev)); 294 fc_remote_port_delete(rport); 295 } 296 } 297 lpfc_destroy_vport_work_array(phba, vports); 298 } 299 300 /** 301 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 302 * @vport: The virtual port for which this call being executed. 303 * @num_to_alloc: The requested number of buffers to allocate. 304 * 305 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 306 * the scsi buffer contains all the necessary information needed to initiate 307 * a SCSI I/O. The non-DMAable buffer region contains information to build 308 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 309 * and the initial BPL. In addition to allocating memory, the FCP CMND and 310 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 311 * 312 * Return codes: 313 * int - number of scsi buffers that were allocated. 314 * 0 = failure, less than num_to_alloc is a partial failure. 315 **/ 316 static int 317 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 318 { 319 struct lpfc_hba *phba = vport->phba; 320 struct lpfc_io_buf *psb; 321 struct ulp_bde64 *bpl; 322 IOCB_t *iocb; 323 dma_addr_t pdma_phys_fcp_cmd; 324 dma_addr_t pdma_phys_fcp_rsp; 325 dma_addr_t pdma_phys_sgl; 326 uint16_t iotag; 327 int bcnt, bpl_size; 328 329 bpl_size = phba->cfg_sg_dma_buf_size - 330 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 331 332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 333 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 334 num_to_alloc, phba->cfg_sg_dma_buf_size, 335 (int)sizeof(struct fcp_cmnd), 336 (int)sizeof(struct fcp_rsp), bpl_size); 337 338 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 339 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 340 if (!psb) 341 break; 342 343 /* 344 * Get memory from the pci pool to map the virt space to pci 345 * bus space for an I/O. The DMA buffer includes space for the 346 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 347 * necessary to support the sg_tablesize. 348 */ 349 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 350 GFP_KERNEL, &psb->dma_handle); 351 if (!psb->data) { 352 kfree(psb); 353 break; 354 } 355 356 357 /* Allocate iotag for psb->cur_iocbq. */ 358 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 359 if (iotag == 0) { 360 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 361 psb->data, psb->dma_handle); 362 kfree(psb); 363 break; 364 } 365 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 366 367 psb->fcp_cmnd = psb->data; 368 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 369 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 370 sizeof(struct fcp_rsp); 371 372 /* Initialize local short-hand pointers. */ 373 bpl = (struct ulp_bde64 *)psb->dma_sgl; 374 pdma_phys_fcp_cmd = psb->dma_handle; 375 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 376 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 377 sizeof(struct fcp_rsp); 378 379 /* 380 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 381 * are sg list bdes. Initialize the first two and leave the 382 * rest for queuecommand. 383 */ 384 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 385 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 386 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 387 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 388 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 389 390 /* Setup the physical region for the FCP RSP */ 391 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 392 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 393 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 394 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 395 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 396 397 /* 398 * Since the IOCB for the FCP I/O is built into this 399 * lpfc_scsi_buf, initialize it with all known data now. 400 */ 401 iocb = &psb->cur_iocbq.iocb; 402 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 403 if ((phba->sli_rev == 3) && 404 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 405 /* fill in immediate fcp command BDE */ 406 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 407 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 408 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 409 unsli3.fcp_ext.icd); 410 iocb->un.fcpi64.bdl.addrHigh = 0; 411 iocb->ulpBdeCount = 0; 412 iocb->ulpLe = 0; 413 /* fill in response BDE */ 414 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 415 BUFF_TYPE_BDE_64; 416 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 417 sizeof(struct fcp_rsp); 418 iocb->unsli3.fcp_ext.rbde.addrLow = 419 putPaddrLow(pdma_phys_fcp_rsp); 420 iocb->unsli3.fcp_ext.rbde.addrHigh = 421 putPaddrHigh(pdma_phys_fcp_rsp); 422 } else { 423 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 424 iocb->un.fcpi64.bdl.bdeSize = 425 (2 * sizeof(struct ulp_bde64)); 426 iocb->un.fcpi64.bdl.addrLow = 427 putPaddrLow(pdma_phys_sgl); 428 iocb->un.fcpi64.bdl.addrHigh = 429 putPaddrHigh(pdma_phys_sgl); 430 iocb->ulpBdeCount = 1; 431 iocb->ulpLe = 1; 432 } 433 iocb->ulpClass = CLASS3; 434 psb->status = IOSTAT_SUCCESS; 435 /* Put it back into the SCSI buffer list */ 436 psb->cur_iocbq.context1 = psb; 437 spin_lock_init(&psb->buf_lock); 438 lpfc_release_scsi_buf_s3(phba, psb); 439 440 } 441 442 return bcnt; 443 } 444 445 /** 446 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 447 * @vport: pointer to lpfc vport data structure. 448 * 449 * This routine is invoked by the vport cleanup for deletions and the cleanup 450 * for an ndlp on removal. 451 **/ 452 void 453 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 454 { 455 struct lpfc_hba *phba = vport->phba; 456 struct lpfc_io_buf *psb, *next_psb; 457 struct lpfc_sli4_hdw_queue *qp; 458 unsigned long iflag = 0; 459 int idx; 460 461 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 462 return; 463 464 spin_lock_irqsave(&phba->hbalock, iflag); 465 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 466 qp = &phba->sli4_hba.hdwq[idx]; 467 468 spin_lock(&qp->abts_io_buf_list_lock); 469 list_for_each_entry_safe(psb, next_psb, 470 &qp->lpfc_abts_io_buf_list, list) { 471 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) 472 continue; 473 474 if (psb->rdata && psb->rdata->pnode && 475 psb->rdata->pnode->vport == vport) 476 psb->rdata = NULL; 477 } 478 spin_unlock(&qp->abts_io_buf_list_lock); 479 } 480 spin_unlock_irqrestore(&phba->hbalock, iflag); 481 } 482 483 /** 484 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 485 * @phba: pointer to lpfc hba data structure. 486 * @axri: pointer to the fcp xri abort wcqe structure. 487 * @idx: index into hdwq 488 * 489 * This routine is invoked by the worker thread to process a SLI4 fast-path 490 * FCP or NVME aborted xri. 491 **/ 492 void 493 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 494 struct sli4_wcqe_xri_aborted *axri, int idx) 495 { 496 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 497 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 498 struct lpfc_io_buf *psb, *next_psb; 499 struct lpfc_sli4_hdw_queue *qp; 500 unsigned long iflag = 0; 501 struct lpfc_iocbq *iocbq; 502 int i; 503 struct lpfc_nodelist *ndlp; 504 int rrq_empty = 0; 505 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 506 struct scsi_cmnd *cmd; 507 508 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 509 return; 510 511 qp = &phba->sli4_hba.hdwq[idx]; 512 spin_lock_irqsave(&phba->hbalock, iflag); 513 spin_lock(&qp->abts_io_buf_list_lock); 514 list_for_each_entry_safe(psb, next_psb, 515 &qp->lpfc_abts_io_buf_list, list) { 516 if (psb->cur_iocbq.sli4_xritag == xri) { 517 list_del_init(&psb->list); 518 psb->flags &= ~LPFC_SBUF_XBUSY; 519 psb->status = IOSTAT_SUCCESS; 520 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { 521 qp->abts_nvme_io_bufs--; 522 spin_unlock(&qp->abts_io_buf_list_lock); 523 spin_unlock_irqrestore(&phba->hbalock, iflag); 524 lpfc_sli4_nvme_xri_aborted(phba, axri, psb); 525 return; 526 } 527 qp->abts_scsi_io_bufs--; 528 spin_unlock(&qp->abts_io_buf_list_lock); 529 530 if (psb->rdata && psb->rdata->pnode) 531 ndlp = psb->rdata->pnode; 532 else 533 ndlp = NULL; 534 535 rrq_empty = list_empty(&phba->active_rrq_list); 536 spin_unlock_irqrestore(&phba->hbalock, iflag); 537 if (ndlp) { 538 lpfc_set_rrq_active(phba, ndlp, 539 psb->cur_iocbq.sli4_lxritag, rxid, 1); 540 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 541 } 542 543 if (phba->cfg_fcp_wait_abts_rsp) { 544 spin_lock_irqsave(&psb->buf_lock, iflag); 545 cmd = psb->pCmd; 546 psb->pCmd = NULL; 547 spin_unlock_irqrestore(&psb->buf_lock, iflag); 548 549 /* The sdev is not guaranteed to be valid post 550 * scsi_done upcall. 551 */ 552 if (cmd) 553 cmd->scsi_done(cmd); 554 555 /* 556 * We expect there is an abort thread waiting 557 * for command completion wake up the thread. 558 */ 559 spin_lock_irqsave(&psb->buf_lock, iflag); 560 psb->cur_iocbq.iocb_flag &= 561 ~LPFC_DRIVER_ABORTED; 562 if (psb->waitq) 563 wake_up(psb->waitq); 564 spin_unlock_irqrestore(&psb->buf_lock, iflag); 565 } 566 567 lpfc_release_scsi_buf_s4(phba, psb); 568 if (rrq_empty) 569 lpfc_worker_wake_up(phba); 570 return; 571 } 572 } 573 spin_unlock(&qp->abts_io_buf_list_lock); 574 for (i = 1; i <= phba->sli.last_iotag; i++) { 575 iocbq = phba->sli.iocbq_lookup[i]; 576 577 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 578 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 579 continue; 580 if (iocbq->sli4_xritag != xri) 581 continue; 582 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 583 psb->flags &= ~LPFC_SBUF_XBUSY; 584 spin_unlock_irqrestore(&phba->hbalock, iflag); 585 if (!list_empty(&pring->txq)) 586 lpfc_worker_wake_up(phba); 587 return; 588 589 } 590 spin_unlock_irqrestore(&phba->hbalock, iflag); 591 } 592 593 /** 594 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 595 * @phba: The HBA for which this call is being executed. 596 * @ndlp: pointer to a node-list data structure. 597 * @cmnd: Pointer to scsi_cmnd data structure. 598 * 599 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 600 * and returns to caller. 601 * 602 * Return codes: 603 * NULL - Error 604 * Pointer to lpfc_scsi_buf - Success 605 **/ 606 static struct lpfc_io_buf * 607 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 608 struct scsi_cmnd *cmnd) 609 { 610 struct lpfc_io_buf *lpfc_cmd = NULL; 611 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 612 unsigned long iflag = 0; 613 614 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 615 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 616 list); 617 if (!lpfc_cmd) { 618 spin_lock(&phba->scsi_buf_list_put_lock); 619 list_splice(&phba->lpfc_scsi_buf_list_put, 620 &phba->lpfc_scsi_buf_list_get); 621 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 622 list_remove_head(scsi_buf_list_get, lpfc_cmd, 623 struct lpfc_io_buf, list); 624 spin_unlock(&phba->scsi_buf_list_put_lock); 625 } 626 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 627 628 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 629 atomic_inc(&ndlp->cmd_pending); 630 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 631 } 632 return lpfc_cmd; 633 } 634 /** 635 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 636 * @phba: The HBA for which this call is being executed. 637 * @ndlp: pointer to a node-list data structure. 638 * @cmnd: Pointer to scsi_cmnd data structure. 639 * 640 * This routine removes a scsi buffer from head of @hdwq io_buf_list 641 * and returns to caller. 642 * 643 * Return codes: 644 * NULL - Error 645 * Pointer to lpfc_scsi_buf - Success 646 **/ 647 static struct lpfc_io_buf * 648 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 649 struct scsi_cmnd *cmnd) 650 { 651 struct lpfc_io_buf *lpfc_cmd; 652 struct lpfc_sli4_hdw_queue *qp; 653 struct sli4_sge *sgl; 654 dma_addr_t pdma_phys_fcp_rsp; 655 dma_addr_t pdma_phys_fcp_cmd; 656 uint32_t cpu, idx; 657 int tag; 658 struct fcp_cmd_rsp_buf *tmp = NULL; 659 660 cpu = raw_smp_processor_id(); 661 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 662 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); 663 idx = blk_mq_unique_tag_to_hwq(tag); 664 } else { 665 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 666 } 667 668 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 669 !phba->cfg_xri_rebalancing); 670 if (!lpfc_cmd) { 671 qp = &phba->sli4_hba.hdwq[idx]; 672 qp->empty_io_bufs++; 673 return NULL; 674 } 675 676 /* Setup key fields in buffer that may have been changed 677 * if other protocols used this buffer. 678 */ 679 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP; 680 lpfc_cmd->prot_seg_cnt = 0; 681 lpfc_cmd->seg_cnt = 0; 682 lpfc_cmd->timeout = 0; 683 lpfc_cmd->flags = 0; 684 lpfc_cmd->start_time = jiffies; 685 lpfc_cmd->waitq = NULL; 686 lpfc_cmd->cpu = cpu; 687 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 688 lpfc_cmd->prot_data_type = 0; 689 #endif 690 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 691 if (!tmp) { 692 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 693 return NULL; 694 } 695 696 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 697 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 698 699 /* 700 * The first two SGEs are the FCP_CMD and FCP_RSP. 701 * The balance are sg list bdes. Initialize the 702 * first two and leave the rest for queuecommand. 703 */ 704 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 705 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 706 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 707 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 708 sgl->word2 = le32_to_cpu(sgl->word2); 709 bf_set(lpfc_sli4_sge_last, sgl, 0); 710 sgl->word2 = cpu_to_le32(sgl->word2); 711 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 712 sgl++; 713 714 /* Setup the physical region for the FCP RSP */ 715 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 716 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 717 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 718 sgl->word2 = le32_to_cpu(sgl->word2); 719 bf_set(lpfc_sli4_sge_last, sgl, 1); 720 sgl->word2 = cpu_to_le32(sgl->word2); 721 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 722 723 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 724 atomic_inc(&ndlp->cmd_pending); 725 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 726 } 727 return lpfc_cmd; 728 } 729 /** 730 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 731 * @phba: The HBA for which this call is being executed. 732 * @ndlp: pointer to a node-list data structure. 733 * @cmnd: Pointer to scsi_cmnd data structure. 734 * 735 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 736 * and returns to caller. 737 * 738 * Return codes: 739 * NULL - Error 740 * Pointer to lpfc_scsi_buf - Success 741 **/ 742 static struct lpfc_io_buf* 743 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 744 struct scsi_cmnd *cmnd) 745 { 746 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 747 } 748 749 /** 750 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 751 * @phba: The Hba for which this call is being executed. 752 * @psb: The scsi buffer which is being released. 753 * 754 * This routine releases @psb scsi buffer by adding it to tail of @phba 755 * lpfc_scsi_buf_list list. 756 **/ 757 static void 758 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 759 { 760 unsigned long iflag = 0; 761 762 psb->seg_cnt = 0; 763 psb->prot_seg_cnt = 0; 764 765 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 766 psb->pCmd = NULL; 767 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 768 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 769 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 770 } 771 772 /** 773 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 774 * @phba: The Hba for which this call is being executed. 775 * @psb: The scsi buffer which is being released. 776 * 777 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 778 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 779 * and cannot be reused for at least RA_TOV amount of time if it was 780 * aborted. 781 **/ 782 static void 783 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 784 { 785 struct lpfc_sli4_hdw_queue *qp; 786 unsigned long iflag = 0; 787 788 psb->seg_cnt = 0; 789 psb->prot_seg_cnt = 0; 790 791 qp = psb->hdwq; 792 if (psb->flags & LPFC_SBUF_XBUSY) { 793 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 794 if (!phba->cfg_fcp_wait_abts_rsp) 795 psb->pCmd = NULL; 796 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 797 qp->abts_scsi_io_bufs++; 798 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 799 } else { 800 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 801 } 802 } 803 804 /** 805 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 806 * @phba: The Hba for which this call is being executed. 807 * @psb: The scsi buffer which is being released. 808 * 809 * This routine releases @psb scsi buffer by adding it to tail of @phba 810 * lpfc_scsi_buf_list list. 811 **/ 812 static void 813 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 814 { 815 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 816 atomic_dec(&psb->ndlp->cmd_pending); 817 818 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 819 phba->lpfc_release_scsi_buf(phba, psb); 820 } 821 822 /** 823 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 824 * @data: A pointer to the immediate command data portion of the IOCB. 825 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 826 * 827 * The routine copies the entire FCP command from @fcp_cmnd to @data while 828 * byte swapping the data to big endian format for transmission on the wire. 829 **/ 830 static void 831 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 832 { 833 int i, j; 834 835 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 836 i += sizeof(uint32_t), j++) { 837 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 838 } 839 } 840 841 /** 842 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 843 * @phba: The Hba for which this call is being executed. 844 * @lpfc_cmd: The scsi buffer which is going to be mapped. 845 * 846 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 847 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 848 * through sg elements and format the bde. This routine also initializes all 849 * IOCB fields which are dependent on scsi command request buffer. 850 * 851 * Return codes: 852 * 1 - Error 853 * 0 - Success 854 **/ 855 static int 856 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 857 { 858 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 859 struct scatterlist *sgel = NULL; 860 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 861 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 862 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 863 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 864 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 865 dma_addr_t physaddr; 866 uint32_t num_bde = 0; 867 int nseg, datadir = scsi_cmnd->sc_data_direction; 868 869 /* 870 * There are three possibilities here - use scatter-gather segment, use 871 * the single mapping, or neither. Start the lpfc command prep by 872 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 873 * data bde entry. 874 */ 875 bpl += 2; 876 if (scsi_sg_count(scsi_cmnd)) { 877 /* 878 * The driver stores the segment count returned from pci_map_sg 879 * because this a count of dma-mappings used to map the use_sg 880 * pages. They are not guaranteed to be the same for those 881 * architectures that implement an IOMMU. 882 */ 883 884 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 885 scsi_sg_count(scsi_cmnd), datadir); 886 if (unlikely(!nseg)) 887 return 1; 888 889 lpfc_cmd->seg_cnt = nseg; 890 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 891 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 892 "9064 BLKGRD: %s: Too many sg segments" 893 " from dma_map_sg. Config %d, seg_cnt" 894 " %d\n", __func__, phba->cfg_sg_seg_cnt, 895 lpfc_cmd->seg_cnt); 896 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 897 lpfc_cmd->seg_cnt = 0; 898 scsi_dma_unmap(scsi_cmnd); 899 return 2; 900 } 901 902 /* 903 * The driver established a maximum scatter-gather segment count 904 * during probe that limits the number of sg elements in any 905 * single scsi command. Just run through the seg_cnt and format 906 * the bde's. 907 * When using SLI-3 the driver will try to fit all the BDEs into 908 * the IOCB. If it can't then the BDEs get added to a BPL as it 909 * does for SLI-2 mode. 910 */ 911 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 912 physaddr = sg_dma_address(sgel); 913 if (phba->sli_rev == 3 && 914 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 915 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 916 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 917 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 918 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 919 data_bde->addrLow = putPaddrLow(physaddr); 920 data_bde->addrHigh = putPaddrHigh(physaddr); 921 data_bde++; 922 } else { 923 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 924 bpl->tus.f.bdeSize = sg_dma_len(sgel); 925 bpl->tus.w = le32_to_cpu(bpl->tus.w); 926 bpl->addrLow = 927 le32_to_cpu(putPaddrLow(physaddr)); 928 bpl->addrHigh = 929 le32_to_cpu(putPaddrHigh(physaddr)); 930 bpl++; 931 } 932 } 933 } 934 935 /* 936 * Finish initializing those IOCB fields that are dependent on the 937 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 938 * explicitly reinitialized and for SLI-3 the extended bde count is 939 * explicitly reinitialized since all iocb memory resources are reused. 940 */ 941 if (phba->sli_rev == 3 && 942 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 943 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 944 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 945 /* 946 * The extended IOCB format can only fit 3 BDE or a BPL. 947 * This I/O has more than 3 BDE so the 1st data bde will 948 * be a BPL that is filled in here. 949 */ 950 physaddr = lpfc_cmd->dma_handle; 951 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 952 data_bde->tus.f.bdeSize = (num_bde * 953 sizeof(struct ulp_bde64)); 954 physaddr += (sizeof(struct fcp_cmnd) + 955 sizeof(struct fcp_rsp) + 956 (2 * sizeof(struct ulp_bde64))); 957 data_bde->addrHigh = putPaddrHigh(physaddr); 958 data_bde->addrLow = putPaddrLow(physaddr); 959 /* ebde count includes the response bde and data bpl */ 960 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 961 } else { 962 /* ebde count includes the response bde and data bdes */ 963 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 964 } 965 } else { 966 iocb_cmd->un.fcpi64.bdl.bdeSize = 967 ((num_bde + 2) * sizeof(struct ulp_bde64)); 968 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 969 } 970 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 971 972 /* 973 * Due to difference in data length between DIF/non-DIF paths, 974 * we need to set word 4 of IOCB here 975 */ 976 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 977 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 978 return 0; 979 } 980 981 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 982 983 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 984 #define BG_ERR_INIT 0x1 985 /* Return BG_ERR_TGT if error injection is detected by Target */ 986 #define BG_ERR_TGT 0x2 987 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 988 #define BG_ERR_SWAP 0x10 989 /* 990 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 991 * error injection 992 */ 993 #define BG_ERR_CHECK 0x20 994 995 /** 996 * lpfc_bg_err_inject - Determine if we should inject an error 997 * @phba: The Hba for which this call is being executed. 998 * @sc: The SCSI command to examine 999 * @reftag: (out) BlockGuard reference tag for transmitted data 1000 * @apptag: (out) BlockGuard application tag for transmitted data 1001 * @new_guard: (in) Value to replace CRC with if needed 1002 * 1003 * Returns BG_ERR_* bit mask or 0 if request ignored 1004 **/ 1005 static int 1006 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1007 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1008 { 1009 struct scatterlist *sgpe; /* s/g prot entry */ 1010 struct lpfc_io_buf *lpfc_cmd = NULL; 1011 struct scsi_dif_tuple *src = NULL; 1012 struct lpfc_nodelist *ndlp; 1013 struct lpfc_rport_data *rdata; 1014 uint32_t op = scsi_get_prot_op(sc); 1015 uint32_t blksize; 1016 uint32_t numblks; 1017 u32 lba; 1018 int rc = 0; 1019 int blockoff = 0; 1020 1021 if (op == SCSI_PROT_NORMAL) 1022 return 0; 1023 1024 sgpe = scsi_prot_sglist(sc); 1025 lba = scsi_prot_ref_tag(sc); 1026 if (lba == LPFC_INVALID_REFTAG) 1027 return 0; 1028 1029 /* First check if we need to match the LBA */ 1030 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1031 blksize = scsi_prot_interval(sc); 1032 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1033 1034 /* Make sure we have the right LBA if one is specified */ 1035 if (phba->lpfc_injerr_lba < (u64)lba || 1036 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 1037 return 0; 1038 if (sgpe) { 1039 blockoff = phba->lpfc_injerr_lba - (u64)lba; 1040 numblks = sg_dma_len(sgpe) / 1041 sizeof(struct scsi_dif_tuple); 1042 if (numblks < blockoff) 1043 blockoff = numblks; 1044 } 1045 } 1046 1047 /* Next check if we need to match the remote NPortID or WWPN */ 1048 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1049 if (rdata && rdata->pnode) { 1050 ndlp = rdata->pnode; 1051 1052 /* Make sure we have the right NPortID if one is specified */ 1053 if (phba->lpfc_injerr_nportid && 1054 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1055 return 0; 1056 1057 /* 1058 * Make sure we have the right WWPN if one is specified. 1059 * wwn[0] should be a non-zero NAA in a good WWPN. 1060 */ 1061 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1062 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1063 sizeof(struct lpfc_name)) != 0)) 1064 return 0; 1065 } 1066 1067 /* Setup a ptr to the protection data if the SCSI host provides it */ 1068 if (sgpe) { 1069 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1070 src += blockoff; 1071 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1072 } 1073 1074 /* Should we change the Reference Tag */ 1075 if (reftag) { 1076 if (phba->lpfc_injerr_wref_cnt) { 1077 switch (op) { 1078 case SCSI_PROT_WRITE_PASS: 1079 if (src) { 1080 /* 1081 * For WRITE_PASS, force the error 1082 * to be sent on the wire. It should 1083 * be detected by the Target. 1084 * If blockoff != 0 error will be 1085 * inserted in middle of the IO. 1086 */ 1087 1088 lpfc_printf_log(phba, KERN_ERR, 1089 LOG_TRACE_EVENT, 1090 "9076 BLKGRD: Injecting reftag error: " 1091 "write lba x%lx + x%x oldrefTag x%x\n", 1092 (unsigned long)lba, blockoff, 1093 be32_to_cpu(src->ref_tag)); 1094 1095 /* 1096 * Save the old ref_tag so we can 1097 * restore it on completion. 1098 */ 1099 if (lpfc_cmd) { 1100 lpfc_cmd->prot_data_type = 1101 LPFC_INJERR_REFTAG; 1102 lpfc_cmd->prot_data_segment = 1103 src; 1104 lpfc_cmd->prot_data = 1105 src->ref_tag; 1106 } 1107 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1108 phba->lpfc_injerr_wref_cnt--; 1109 if (phba->lpfc_injerr_wref_cnt == 0) { 1110 phba->lpfc_injerr_nportid = 0; 1111 phba->lpfc_injerr_lba = 1112 LPFC_INJERR_LBA_OFF; 1113 memset(&phba->lpfc_injerr_wwpn, 1114 0, sizeof(struct lpfc_name)); 1115 } 1116 rc = BG_ERR_TGT | BG_ERR_CHECK; 1117 1118 break; 1119 } 1120 fallthrough; 1121 case SCSI_PROT_WRITE_INSERT: 1122 /* 1123 * For WRITE_INSERT, force the error 1124 * to be sent on the wire. It should be 1125 * detected by the Target. 1126 */ 1127 /* DEADBEEF will be the reftag on the wire */ 1128 *reftag = 0xDEADBEEF; 1129 phba->lpfc_injerr_wref_cnt--; 1130 if (phba->lpfc_injerr_wref_cnt == 0) { 1131 phba->lpfc_injerr_nportid = 0; 1132 phba->lpfc_injerr_lba = 1133 LPFC_INJERR_LBA_OFF; 1134 memset(&phba->lpfc_injerr_wwpn, 1135 0, sizeof(struct lpfc_name)); 1136 } 1137 rc = BG_ERR_TGT | BG_ERR_CHECK; 1138 1139 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1140 "9078 BLKGRD: Injecting reftag error: " 1141 "write lba x%lx\n", (unsigned long)lba); 1142 break; 1143 case SCSI_PROT_WRITE_STRIP: 1144 /* 1145 * For WRITE_STRIP and WRITE_PASS, 1146 * force the error on data 1147 * being copied from SLI-Host to SLI-Port. 1148 */ 1149 *reftag = 0xDEADBEEF; 1150 phba->lpfc_injerr_wref_cnt--; 1151 if (phba->lpfc_injerr_wref_cnt == 0) { 1152 phba->lpfc_injerr_nportid = 0; 1153 phba->lpfc_injerr_lba = 1154 LPFC_INJERR_LBA_OFF; 1155 memset(&phba->lpfc_injerr_wwpn, 1156 0, sizeof(struct lpfc_name)); 1157 } 1158 rc = BG_ERR_INIT; 1159 1160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1161 "9077 BLKGRD: Injecting reftag error: " 1162 "write lba x%lx\n", (unsigned long)lba); 1163 break; 1164 } 1165 } 1166 if (phba->lpfc_injerr_rref_cnt) { 1167 switch (op) { 1168 case SCSI_PROT_READ_INSERT: 1169 case SCSI_PROT_READ_STRIP: 1170 case SCSI_PROT_READ_PASS: 1171 /* 1172 * For READ_STRIP and READ_PASS, force the 1173 * error on data being read off the wire. It 1174 * should force an IO error to the driver. 1175 */ 1176 *reftag = 0xDEADBEEF; 1177 phba->lpfc_injerr_rref_cnt--; 1178 if (phba->lpfc_injerr_rref_cnt == 0) { 1179 phba->lpfc_injerr_nportid = 0; 1180 phba->lpfc_injerr_lba = 1181 LPFC_INJERR_LBA_OFF; 1182 memset(&phba->lpfc_injerr_wwpn, 1183 0, sizeof(struct lpfc_name)); 1184 } 1185 rc = BG_ERR_INIT; 1186 1187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1188 "9079 BLKGRD: Injecting reftag error: " 1189 "read lba x%lx\n", (unsigned long)lba); 1190 break; 1191 } 1192 } 1193 } 1194 1195 /* Should we change the Application Tag */ 1196 if (apptag) { 1197 if (phba->lpfc_injerr_wapp_cnt) { 1198 switch (op) { 1199 case SCSI_PROT_WRITE_PASS: 1200 if (src) { 1201 /* 1202 * For WRITE_PASS, force the error 1203 * to be sent on the wire. It should 1204 * be detected by the Target. 1205 * If blockoff != 0 error will be 1206 * inserted in middle of the IO. 1207 */ 1208 1209 lpfc_printf_log(phba, KERN_ERR, 1210 LOG_TRACE_EVENT, 1211 "9080 BLKGRD: Injecting apptag error: " 1212 "write lba x%lx + x%x oldappTag x%x\n", 1213 (unsigned long)lba, blockoff, 1214 be16_to_cpu(src->app_tag)); 1215 1216 /* 1217 * Save the old app_tag so we can 1218 * restore it on completion. 1219 */ 1220 if (lpfc_cmd) { 1221 lpfc_cmd->prot_data_type = 1222 LPFC_INJERR_APPTAG; 1223 lpfc_cmd->prot_data_segment = 1224 src; 1225 lpfc_cmd->prot_data = 1226 src->app_tag; 1227 } 1228 src->app_tag = cpu_to_be16(0xDEAD); 1229 phba->lpfc_injerr_wapp_cnt--; 1230 if (phba->lpfc_injerr_wapp_cnt == 0) { 1231 phba->lpfc_injerr_nportid = 0; 1232 phba->lpfc_injerr_lba = 1233 LPFC_INJERR_LBA_OFF; 1234 memset(&phba->lpfc_injerr_wwpn, 1235 0, sizeof(struct lpfc_name)); 1236 } 1237 rc = BG_ERR_TGT | BG_ERR_CHECK; 1238 break; 1239 } 1240 fallthrough; 1241 case SCSI_PROT_WRITE_INSERT: 1242 /* 1243 * For WRITE_INSERT, force the 1244 * error to be sent on the wire. It should be 1245 * detected by the Target. 1246 */ 1247 /* DEAD will be the apptag on the wire */ 1248 *apptag = 0xDEAD; 1249 phba->lpfc_injerr_wapp_cnt--; 1250 if (phba->lpfc_injerr_wapp_cnt == 0) { 1251 phba->lpfc_injerr_nportid = 0; 1252 phba->lpfc_injerr_lba = 1253 LPFC_INJERR_LBA_OFF; 1254 memset(&phba->lpfc_injerr_wwpn, 1255 0, sizeof(struct lpfc_name)); 1256 } 1257 rc = BG_ERR_TGT | BG_ERR_CHECK; 1258 1259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1260 "0813 BLKGRD: Injecting apptag error: " 1261 "write lba x%lx\n", (unsigned long)lba); 1262 break; 1263 case SCSI_PROT_WRITE_STRIP: 1264 /* 1265 * For WRITE_STRIP and WRITE_PASS, 1266 * force the error on data 1267 * being copied from SLI-Host to SLI-Port. 1268 */ 1269 *apptag = 0xDEAD; 1270 phba->lpfc_injerr_wapp_cnt--; 1271 if (phba->lpfc_injerr_wapp_cnt == 0) { 1272 phba->lpfc_injerr_nportid = 0; 1273 phba->lpfc_injerr_lba = 1274 LPFC_INJERR_LBA_OFF; 1275 memset(&phba->lpfc_injerr_wwpn, 1276 0, sizeof(struct lpfc_name)); 1277 } 1278 rc = BG_ERR_INIT; 1279 1280 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1281 "0812 BLKGRD: Injecting apptag error: " 1282 "write lba x%lx\n", (unsigned long)lba); 1283 break; 1284 } 1285 } 1286 if (phba->lpfc_injerr_rapp_cnt) { 1287 switch (op) { 1288 case SCSI_PROT_READ_INSERT: 1289 case SCSI_PROT_READ_STRIP: 1290 case SCSI_PROT_READ_PASS: 1291 /* 1292 * For READ_STRIP and READ_PASS, force the 1293 * error on data being read off the wire. It 1294 * should force an IO error to the driver. 1295 */ 1296 *apptag = 0xDEAD; 1297 phba->lpfc_injerr_rapp_cnt--; 1298 if (phba->lpfc_injerr_rapp_cnt == 0) { 1299 phba->lpfc_injerr_nportid = 0; 1300 phba->lpfc_injerr_lba = 1301 LPFC_INJERR_LBA_OFF; 1302 memset(&phba->lpfc_injerr_wwpn, 1303 0, sizeof(struct lpfc_name)); 1304 } 1305 rc = BG_ERR_INIT; 1306 1307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1308 "0814 BLKGRD: Injecting apptag error: " 1309 "read lba x%lx\n", (unsigned long)lba); 1310 break; 1311 } 1312 } 1313 } 1314 1315 1316 /* Should we change the Guard Tag */ 1317 if (new_guard) { 1318 if (phba->lpfc_injerr_wgrd_cnt) { 1319 switch (op) { 1320 case SCSI_PROT_WRITE_PASS: 1321 rc = BG_ERR_CHECK; 1322 fallthrough; 1323 1324 case SCSI_PROT_WRITE_INSERT: 1325 /* 1326 * For WRITE_INSERT, force the 1327 * error to be sent on the wire. It should be 1328 * detected by the Target. 1329 */ 1330 phba->lpfc_injerr_wgrd_cnt--; 1331 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1332 phba->lpfc_injerr_nportid = 0; 1333 phba->lpfc_injerr_lba = 1334 LPFC_INJERR_LBA_OFF; 1335 memset(&phba->lpfc_injerr_wwpn, 1336 0, sizeof(struct lpfc_name)); 1337 } 1338 1339 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1340 /* Signals the caller to swap CRC->CSUM */ 1341 1342 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1343 "0817 BLKGRD: Injecting guard error: " 1344 "write lba x%lx\n", (unsigned long)lba); 1345 break; 1346 case SCSI_PROT_WRITE_STRIP: 1347 /* 1348 * For WRITE_STRIP and WRITE_PASS, 1349 * force the error on data 1350 * being copied from SLI-Host to SLI-Port. 1351 */ 1352 phba->lpfc_injerr_wgrd_cnt--; 1353 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1354 phba->lpfc_injerr_nportid = 0; 1355 phba->lpfc_injerr_lba = 1356 LPFC_INJERR_LBA_OFF; 1357 memset(&phba->lpfc_injerr_wwpn, 1358 0, sizeof(struct lpfc_name)); 1359 } 1360 1361 rc = BG_ERR_INIT | BG_ERR_SWAP; 1362 /* Signals the caller to swap CRC->CSUM */ 1363 1364 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1365 "0816 BLKGRD: Injecting guard error: " 1366 "write lba x%lx\n", (unsigned long)lba); 1367 break; 1368 } 1369 } 1370 if (phba->lpfc_injerr_rgrd_cnt) { 1371 switch (op) { 1372 case SCSI_PROT_READ_INSERT: 1373 case SCSI_PROT_READ_STRIP: 1374 case SCSI_PROT_READ_PASS: 1375 /* 1376 * For READ_STRIP and READ_PASS, force the 1377 * error on data being read off the wire. It 1378 * should force an IO error to the driver. 1379 */ 1380 phba->lpfc_injerr_rgrd_cnt--; 1381 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1382 phba->lpfc_injerr_nportid = 0; 1383 phba->lpfc_injerr_lba = 1384 LPFC_INJERR_LBA_OFF; 1385 memset(&phba->lpfc_injerr_wwpn, 1386 0, sizeof(struct lpfc_name)); 1387 } 1388 1389 rc = BG_ERR_INIT | BG_ERR_SWAP; 1390 /* Signals the caller to swap CRC->CSUM */ 1391 1392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1393 "0818 BLKGRD: Injecting guard error: " 1394 "read lba x%lx\n", (unsigned long)lba); 1395 } 1396 } 1397 } 1398 1399 return rc; 1400 } 1401 #endif 1402 1403 /** 1404 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1405 * the specified SCSI command. 1406 * @phba: The Hba for which this call is being executed. 1407 * @sc: The SCSI command to examine 1408 * @txop: (out) BlockGuard operation for transmitted data 1409 * @rxop: (out) BlockGuard operation for received data 1410 * 1411 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1412 * 1413 **/ 1414 static int 1415 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1416 uint8_t *txop, uint8_t *rxop) 1417 { 1418 uint8_t ret = 0; 1419 1420 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1421 switch (scsi_get_prot_op(sc)) { 1422 case SCSI_PROT_READ_INSERT: 1423 case SCSI_PROT_WRITE_STRIP: 1424 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1425 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1426 break; 1427 1428 case SCSI_PROT_READ_STRIP: 1429 case SCSI_PROT_WRITE_INSERT: 1430 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1431 *txop = BG_OP_IN_NODIF_OUT_CRC; 1432 break; 1433 1434 case SCSI_PROT_READ_PASS: 1435 case SCSI_PROT_WRITE_PASS: 1436 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1437 *txop = BG_OP_IN_CSUM_OUT_CRC; 1438 break; 1439 1440 case SCSI_PROT_NORMAL: 1441 default: 1442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1443 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1444 scsi_get_prot_op(sc)); 1445 ret = 1; 1446 break; 1447 1448 } 1449 } else { 1450 switch (scsi_get_prot_op(sc)) { 1451 case SCSI_PROT_READ_STRIP: 1452 case SCSI_PROT_WRITE_INSERT: 1453 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1454 *txop = BG_OP_IN_NODIF_OUT_CRC; 1455 break; 1456 1457 case SCSI_PROT_READ_PASS: 1458 case SCSI_PROT_WRITE_PASS: 1459 *rxop = BG_OP_IN_CRC_OUT_CRC; 1460 *txop = BG_OP_IN_CRC_OUT_CRC; 1461 break; 1462 1463 case SCSI_PROT_READ_INSERT: 1464 case SCSI_PROT_WRITE_STRIP: 1465 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1466 *txop = BG_OP_IN_CRC_OUT_NODIF; 1467 break; 1468 1469 case SCSI_PROT_NORMAL: 1470 default: 1471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1472 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1473 scsi_get_prot_op(sc)); 1474 ret = 1; 1475 break; 1476 } 1477 } 1478 1479 return ret; 1480 } 1481 1482 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1483 /** 1484 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1485 * the specified SCSI command in order to force a guard tag error. 1486 * @phba: The Hba for which this call is being executed. 1487 * @sc: The SCSI command to examine 1488 * @txop: (out) BlockGuard operation for transmitted data 1489 * @rxop: (out) BlockGuard operation for received data 1490 * 1491 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1492 * 1493 **/ 1494 static int 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 uint8_t *txop, uint8_t *rxop) 1497 { 1498 uint8_t ret = 0; 1499 1500 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1501 switch (scsi_get_prot_op(sc)) { 1502 case SCSI_PROT_READ_INSERT: 1503 case SCSI_PROT_WRITE_STRIP: 1504 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1505 *txop = BG_OP_IN_CRC_OUT_NODIF; 1506 break; 1507 1508 case SCSI_PROT_READ_STRIP: 1509 case SCSI_PROT_WRITE_INSERT: 1510 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1511 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1512 break; 1513 1514 case SCSI_PROT_READ_PASS: 1515 case SCSI_PROT_WRITE_PASS: 1516 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1517 *txop = BG_OP_IN_CRC_OUT_CSUM; 1518 break; 1519 1520 case SCSI_PROT_NORMAL: 1521 default: 1522 break; 1523 1524 } 1525 } else { 1526 switch (scsi_get_prot_op(sc)) { 1527 case SCSI_PROT_READ_STRIP: 1528 case SCSI_PROT_WRITE_INSERT: 1529 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1530 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1531 break; 1532 1533 case SCSI_PROT_READ_PASS: 1534 case SCSI_PROT_WRITE_PASS: 1535 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1536 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1537 break; 1538 1539 case SCSI_PROT_READ_INSERT: 1540 case SCSI_PROT_WRITE_STRIP: 1541 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1542 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1543 break; 1544 1545 case SCSI_PROT_NORMAL: 1546 default: 1547 break; 1548 } 1549 } 1550 1551 return ret; 1552 } 1553 #endif 1554 1555 /** 1556 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1557 * @phba: The Hba for which this call is being executed. 1558 * @sc: pointer to scsi command we're working on 1559 * @bpl: pointer to buffer list for protection groups 1560 * @datasegcnt: number of segments of data that have been dma mapped 1561 * 1562 * This function sets up BPL buffer list for protection groups of 1563 * type LPFC_PG_TYPE_NO_DIF 1564 * 1565 * This is usually used when the HBA is instructed to generate 1566 * DIFs and insert them into data stream (or strip DIF from 1567 * incoming data stream) 1568 * 1569 * The buffer list consists of just one protection group described 1570 * below: 1571 * +-------------------------+ 1572 * start of prot group --> | PDE_5 | 1573 * +-------------------------+ 1574 * | PDE_6 | 1575 * +-------------------------+ 1576 * | Data BDE | 1577 * +-------------------------+ 1578 * |more Data BDE's ... (opt)| 1579 * +-------------------------+ 1580 * 1581 * 1582 * Note: Data s/g buffers have been dma mapped 1583 * 1584 * Returns the number of BDEs added to the BPL. 1585 **/ 1586 static int 1587 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1588 struct ulp_bde64 *bpl, int datasegcnt) 1589 { 1590 struct scatterlist *sgde = NULL; /* s/g data entry */ 1591 struct lpfc_pde5 *pde5 = NULL; 1592 struct lpfc_pde6 *pde6 = NULL; 1593 dma_addr_t physaddr; 1594 int i = 0, num_bde = 0, status; 1595 int datadir = sc->sc_data_direction; 1596 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1597 uint32_t rc; 1598 #endif 1599 uint32_t checking = 1; 1600 uint32_t reftag; 1601 uint8_t txop, rxop; 1602 1603 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1604 if (status) 1605 goto out; 1606 1607 /* extract some info from the scsi command for pde*/ 1608 reftag = scsi_prot_ref_tag(sc); 1609 if (reftag == LPFC_INVALID_REFTAG) 1610 goto out; 1611 1612 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1613 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1614 if (rc) { 1615 if (rc & BG_ERR_SWAP) 1616 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1617 if (rc & BG_ERR_CHECK) 1618 checking = 0; 1619 } 1620 #endif 1621 1622 /* setup PDE5 with what we have */ 1623 pde5 = (struct lpfc_pde5 *) bpl; 1624 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1625 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1626 1627 /* Endianness conversion if necessary for PDE5 */ 1628 pde5->word0 = cpu_to_le32(pde5->word0); 1629 pde5->reftag = cpu_to_le32(reftag); 1630 1631 /* advance bpl and increment bde count */ 1632 num_bde++; 1633 bpl++; 1634 pde6 = (struct lpfc_pde6 *) bpl; 1635 1636 /* setup PDE6 with the rest of the info */ 1637 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1638 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1639 bf_set(pde6_optx, pde6, txop); 1640 bf_set(pde6_oprx, pde6, rxop); 1641 1642 /* 1643 * We only need to check the data on READs, for WRITEs 1644 * protection data is automatically generated, not checked. 1645 */ 1646 if (datadir == DMA_FROM_DEVICE) { 1647 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1648 bf_set(pde6_ce, pde6, checking); 1649 else 1650 bf_set(pde6_ce, pde6, 0); 1651 1652 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1653 bf_set(pde6_re, pde6, checking); 1654 else 1655 bf_set(pde6_re, pde6, 0); 1656 } 1657 bf_set(pde6_ai, pde6, 1); 1658 bf_set(pde6_ae, pde6, 0); 1659 bf_set(pde6_apptagval, pde6, 0); 1660 1661 /* Endianness conversion if necessary for PDE6 */ 1662 pde6->word0 = cpu_to_le32(pde6->word0); 1663 pde6->word1 = cpu_to_le32(pde6->word1); 1664 pde6->word2 = cpu_to_le32(pde6->word2); 1665 1666 /* advance bpl and increment bde count */ 1667 num_bde++; 1668 bpl++; 1669 1670 /* assumption: caller has already run dma_map_sg on command data */ 1671 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1672 physaddr = sg_dma_address(sgde); 1673 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1674 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1675 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1676 if (datadir == DMA_TO_DEVICE) 1677 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1678 else 1679 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1680 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1681 bpl++; 1682 num_bde++; 1683 } 1684 1685 out: 1686 return num_bde; 1687 } 1688 1689 /** 1690 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1691 * @phba: The Hba for which this call is being executed. 1692 * @sc: pointer to scsi command we're working on 1693 * @bpl: pointer to buffer list for protection groups 1694 * @datacnt: number of segments of data that have been dma mapped 1695 * @protcnt: number of segment of protection data that have been dma mapped 1696 * 1697 * This function sets up BPL buffer list for protection groups of 1698 * type LPFC_PG_TYPE_DIF 1699 * 1700 * This is usually used when DIFs are in their own buffers, 1701 * separate from the data. The HBA can then by instructed 1702 * to place the DIFs in the outgoing stream. For read operations, 1703 * The HBA could extract the DIFs and place it in DIF buffers. 1704 * 1705 * The buffer list for this type consists of one or more of the 1706 * protection groups described below: 1707 * +-------------------------+ 1708 * start of first prot group --> | PDE_5 | 1709 * +-------------------------+ 1710 * | PDE_6 | 1711 * +-------------------------+ 1712 * | PDE_7 (Prot BDE) | 1713 * +-------------------------+ 1714 * | Data BDE | 1715 * +-------------------------+ 1716 * |more Data BDE's ... (opt)| 1717 * +-------------------------+ 1718 * start of new prot group --> | PDE_5 | 1719 * +-------------------------+ 1720 * | ... | 1721 * +-------------------------+ 1722 * 1723 * Note: It is assumed that both data and protection s/g buffers have been 1724 * mapped for DMA 1725 * 1726 * Returns the number of BDEs added to the BPL. 1727 **/ 1728 static int 1729 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1730 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1731 { 1732 struct scatterlist *sgde = NULL; /* s/g data entry */ 1733 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1734 struct lpfc_pde5 *pde5 = NULL; 1735 struct lpfc_pde6 *pde6 = NULL; 1736 struct lpfc_pde7 *pde7 = NULL; 1737 dma_addr_t dataphysaddr, protphysaddr; 1738 unsigned short curr_data = 0, curr_prot = 0; 1739 unsigned int split_offset; 1740 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1741 unsigned int protgrp_blks, protgrp_bytes; 1742 unsigned int remainder, subtotal; 1743 int status; 1744 int datadir = sc->sc_data_direction; 1745 unsigned char pgdone = 0, alldone = 0; 1746 unsigned blksize; 1747 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1748 uint32_t rc; 1749 #endif 1750 uint32_t checking = 1; 1751 uint32_t reftag; 1752 uint8_t txop, rxop; 1753 int num_bde = 0; 1754 1755 sgpe = scsi_prot_sglist(sc); 1756 sgde = scsi_sglist(sc); 1757 1758 if (!sgpe || !sgde) { 1759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1760 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1761 sgpe, sgde); 1762 return 0; 1763 } 1764 1765 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1766 if (status) 1767 goto out; 1768 1769 /* extract some info from the scsi command */ 1770 blksize = scsi_prot_interval(sc); 1771 reftag = scsi_prot_ref_tag(sc); 1772 if (reftag == LPFC_INVALID_REFTAG) 1773 goto out; 1774 1775 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1776 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1777 if (rc) { 1778 if (rc & BG_ERR_SWAP) 1779 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1780 if (rc & BG_ERR_CHECK) 1781 checking = 0; 1782 } 1783 #endif 1784 1785 split_offset = 0; 1786 do { 1787 /* Check to see if we ran out of space */ 1788 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1789 return num_bde + 3; 1790 1791 /* setup PDE5 with what we have */ 1792 pde5 = (struct lpfc_pde5 *) bpl; 1793 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1794 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1795 1796 /* Endianness conversion if necessary for PDE5 */ 1797 pde5->word0 = cpu_to_le32(pde5->word0); 1798 pde5->reftag = cpu_to_le32(reftag); 1799 1800 /* advance bpl and increment bde count */ 1801 num_bde++; 1802 bpl++; 1803 pde6 = (struct lpfc_pde6 *) bpl; 1804 1805 /* setup PDE6 with the rest of the info */ 1806 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1807 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1808 bf_set(pde6_optx, pde6, txop); 1809 bf_set(pde6_oprx, pde6, rxop); 1810 1811 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 1812 bf_set(pde6_ce, pde6, checking); 1813 else 1814 bf_set(pde6_ce, pde6, 0); 1815 1816 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 1817 bf_set(pde6_re, pde6, checking); 1818 else 1819 bf_set(pde6_re, pde6, 0); 1820 1821 bf_set(pde6_ai, pde6, 1); 1822 bf_set(pde6_ae, pde6, 0); 1823 bf_set(pde6_apptagval, pde6, 0); 1824 1825 /* Endianness conversion if necessary for PDE6 */ 1826 pde6->word0 = cpu_to_le32(pde6->word0); 1827 pde6->word1 = cpu_to_le32(pde6->word1); 1828 pde6->word2 = cpu_to_le32(pde6->word2); 1829 1830 /* advance bpl and increment bde count */ 1831 num_bde++; 1832 bpl++; 1833 1834 /* setup the first BDE that points to protection buffer */ 1835 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1836 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1837 1838 /* must be integer multiple of the DIF block length */ 1839 BUG_ON(protgroup_len % 8); 1840 1841 pde7 = (struct lpfc_pde7 *) bpl; 1842 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1843 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1844 1845 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1846 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1847 1848 protgrp_blks = protgroup_len / 8; 1849 protgrp_bytes = protgrp_blks * blksize; 1850 1851 /* check if this pde is crossing the 4K boundary; if so split */ 1852 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1853 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1854 protgroup_offset += protgroup_remainder; 1855 protgrp_blks = protgroup_remainder / 8; 1856 protgrp_bytes = protgrp_blks * blksize; 1857 } else { 1858 protgroup_offset = 0; 1859 curr_prot++; 1860 } 1861 1862 num_bde++; 1863 1864 /* setup BDE's for data blocks associated with DIF data */ 1865 pgdone = 0; 1866 subtotal = 0; /* total bytes processed for current prot grp */ 1867 while (!pgdone) { 1868 /* Check to see if we ran out of space */ 1869 if (num_bde >= phba->cfg_total_seg_cnt) 1870 return num_bde + 1; 1871 1872 if (!sgde) { 1873 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1874 "9065 BLKGRD:%s Invalid data segment\n", 1875 __func__); 1876 return 0; 1877 } 1878 bpl++; 1879 dataphysaddr = sg_dma_address(sgde) + split_offset; 1880 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1881 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1882 1883 remainder = sg_dma_len(sgde) - split_offset; 1884 1885 if ((subtotal + remainder) <= protgrp_bytes) { 1886 /* we can use this whole buffer */ 1887 bpl->tus.f.bdeSize = remainder; 1888 split_offset = 0; 1889 1890 if ((subtotal + remainder) == protgrp_bytes) 1891 pgdone = 1; 1892 } else { 1893 /* must split this buffer with next prot grp */ 1894 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1895 split_offset += bpl->tus.f.bdeSize; 1896 } 1897 1898 subtotal += bpl->tus.f.bdeSize; 1899 1900 if (datadir == DMA_TO_DEVICE) 1901 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1902 else 1903 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1904 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1905 1906 num_bde++; 1907 curr_data++; 1908 1909 if (split_offset) 1910 break; 1911 1912 /* Move to the next s/g segment if possible */ 1913 sgde = sg_next(sgde); 1914 1915 } 1916 1917 if (protgroup_offset) { 1918 /* update the reference tag */ 1919 reftag += protgrp_blks; 1920 bpl++; 1921 continue; 1922 } 1923 1924 /* are we done ? */ 1925 if (curr_prot == protcnt) { 1926 alldone = 1; 1927 } else if (curr_prot < protcnt) { 1928 /* advance to next prot buffer */ 1929 sgpe = sg_next(sgpe); 1930 bpl++; 1931 1932 /* update the reference tag */ 1933 reftag += protgrp_blks; 1934 } else { 1935 /* if we're here, we have a bug */ 1936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1937 "9054 BLKGRD: bug in %s\n", __func__); 1938 } 1939 1940 } while (!alldone); 1941 out: 1942 1943 return num_bde; 1944 } 1945 1946 /** 1947 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1948 * @phba: The Hba for which this call is being executed. 1949 * @sc: pointer to scsi command we're working on 1950 * @sgl: pointer to buffer list for protection groups 1951 * @datasegcnt: number of segments of data that have been dma mapped 1952 * @lpfc_cmd: lpfc scsi command object pointer. 1953 * 1954 * This function sets up SGL buffer list for protection groups of 1955 * type LPFC_PG_TYPE_NO_DIF 1956 * 1957 * This is usually used when the HBA is instructed to generate 1958 * DIFs and insert them into data stream (or strip DIF from 1959 * incoming data stream) 1960 * 1961 * The buffer list consists of just one protection group described 1962 * below: 1963 * +-------------------------+ 1964 * start of prot group --> | DI_SEED | 1965 * +-------------------------+ 1966 * | Data SGE | 1967 * +-------------------------+ 1968 * |more Data SGE's ... (opt)| 1969 * +-------------------------+ 1970 * 1971 * 1972 * Note: Data s/g buffers have been dma mapped 1973 * 1974 * Returns the number of SGEs added to the SGL. 1975 **/ 1976 static int 1977 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1978 struct sli4_sge *sgl, int datasegcnt, 1979 struct lpfc_io_buf *lpfc_cmd) 1980 { 1981 struct scatterlist *sgde = NULL; /* s/g data entry */ 1982 struct sli4_sge_diseed *diseed = NULL; 1983 dma_addr_t physaddr; 1984 int i = 0, num_sge = 0, status; 1985 uint32_t reftag; 1986 uint8_t txop, rxop; 1987 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1988 uint32_t rc; 1989 #endif 1990 uint32_t checking = 1; 1991 uint32_t dma_len; 1992 uint32_t dma_offset = 0; 1993 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1994 int j; 1995 bool lsp_just_set = false; 1996 1997 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1998 if (status) 1999 goto out; 2000 2001 /* extract some info from the scsi command for pde*/ 2002 reftag = scsi_prot_ref_tag(sc); 2003 if (reftag == LPFC_INVALID_REFTAG) 2004 goto out; 2005 2006 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2007 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2008 if (rc) { 2009 if (rc & BG_ERR_SWAP) 2010 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2011 if (rc & BG_ERR_CHECK) 2012 checking = 0; 2013 } 2014 #endif 2015 2016 /* setup DISEED with what we have */ 2017 diseed = (struct sli4_sge_diseed *) sgl; 2018 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2019 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2020 2021 /* Endianness conversion if necessary */ 2022 diseed->ref_tag = cpu_to_le32(reftag); 2023 diseed->ref_tag_tran = diseed->ref_tag; 2024 2025 /* 2026 * We only need to check the data on READs, for WRITEs 2027 * protection data is automatically generated, not checked. 2028 */ 2029 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2030 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) 2031 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2032 else 2033 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2034 2035 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2036 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2037 else 2038 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2039 } 2040 2041 /* setup DISEED with the rest of the info */ 2042 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2043 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2044 2045 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2046 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2047 2048 /* Endianness conversion if necessary for DISEED */ 2049 diseed->word2 = cpu_to_le32(diseed->word2); 2050 diseed->word3 = cpu_to_le32(diseed->word3); 2051 2052 /* advance bpl and increment sge count */ 2053 num_sge++; 2054 sgl++; 2055 2056 /* assumption: caller has already run dma_map_sg on command data */ 2057 sgde = scsi_sglist(sc); 2058 j = 3; 2059 for (i = 0; i < datasegcnt; i++) { 2060 /* clear it */ 2061 sgl->word2 = 0; 2062 2063 /* do we need to expand the segment */ 2064 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2065 ((datasegcnt - 1) != i)) { 2066 /* set LSP type */ 2067 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2068 2069 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2070 2071 if (unlikely(!sgl_xtra)) { 2072 lpfc_cmd->seg_cnt = 0; 2073 return 0; 2074 } 2075 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2076 sgl_xtra->dma_phys_sgl)); 2077 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2078 sgl_xtra->dma_phys_sgl)); 2079 2080 } else { 2081 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2082 } 2083 2084 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2085 if ((datasegcnt - 1) == i) 2086 bf_set(lpfc_sli4_sge_last, sgl, 1); 2087 physaddr = sg_dma_address(sgde); 2088 dma_len = sg_dma_len(sgde); 2089 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2090 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2091 2092 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2093 sgl->word2 = cpu_to_le32(sgl->word2); 2094 sgl->sge_len = cpu_to_le32(dma_len); 2095 2096 dma_offset += dma_len; 2097 sgde = sg_next(sgde); 2098 2099 sgl++; 2100 num_sge++; 2101 lsp_just_set = false; 2102 2103 } else { 2104 sgl->word2 = cpu_to_le32(sgl->word2); 2105 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2106 2107 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2108 i = i - 1; 2109 2110 lsp_just_set = true; 2111 } 2112 2113 j++; 2114 2115 } 2116 2117 out: 2118 return num_sge; 2119 } 2120 2121 /** 2122 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2123 * @phba: The Hba for which this call is being executed. 2124 * @sc: pointer to scsi command we're working on 2125 * @sgl: pointer to buffer list for protection groups 2126 * @datacnt: number of segments of data that have been dma mapped 2127 * @protcnt: number of segment of protection data that have been dma mapped 2128 * @lpfc_cmd: lpfc scsi command object pointer. 2129 * 2130 * This function sets up SGL buffer list for protection groups of 2131 * type LPFC_PG_TYPE_DIF 2132 * 2133 * This is usually used when DIFs are in their own buffers, 2134 * separate from the data. The HBA can then by instructed 2135 * to place the DIFs in the outgoing stream. For read operations, 2136 * The HBA could extract the DIFs and place it in DIF buffers. 2137 * 2138 * The buffer list for this type consists of one or more of the 2139 * protection groups described below: 2140 * +-------------------------+ 2141 * start of first prot group --> | DISEED | 2142 * +-------------------------+ 2143 * | DIF (Prot SGE) | 2144 * +-------------------------+ 2145 * | Data SGE | 2146 * +-------------------------+ 2147 * |more Data SGE's ... (opt)| 2148 * +-------------------------+ 2149 * start of new prot group --> | DISEED | 2150 * +-------------------------+ 2151 * | ... | 2152 * +-------------------------+ 2153 * 2154 * Note: It is assumed that both data and protection s/g buffers have been 2155 * mapped for DMA 2156 * 2157 * Returns the number of SGEs added to the SGL. 2158 **/ 2159 static int 2160 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2161 struct sli4_sge *sgl, int datacnt, int protcnt, 2162 struct lpfc_io_buf *lpfc_cmd) 2163 { 2164 struct scatterlist *sgde = NULL; /* s/g data entry */ 2165 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2166 struct sli4_sge_diseed *diseed = NULL; 2167 dma_addr_t dataphysaddr, protphysaddr; 2168 unsigned short curr_data = 0, curr_prot = 0; 2169 unsigned int split_offset; 2170 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2171 unsigned int protgrp_blks, protgrp_bytes; 2172 unsigned int remainder, subtotal; 2173 int status; 2174 unsigned char pgdone = 0, alldone = 0; 2175 unsigned blksize; 2176 uint32_t reftag; 2177 uint8_t txop, rxop; 2178 uint32_t dma_len; 2179 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2180 uint32_t rc; 2181 #endif 2182 uint32_t checking = 1; 2183 uint32_t dma_offset = 0; 2184 int num_sge = 0, j = 2; 2185 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2186 2187 sgpe = scsi_prot_sglist(sc); 2188 sgde = scsi_sglist(sc); 2189 2190 if (!sgpe || !sgde) { 2191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2192 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2193 sgpe, sgde); 2194 return 0; 2195 } 2196 2197 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2198 if (status) 2199 goto out; 2200 2201 /* extract some info from the scsi command */ 2202 blksize = scsi_prot_interval(sc); 2203 reftag = scsi_prot_ref_tag(sc); 2204 if (reftag == LPFC_INVALID_REFTAG) 2205 goto out; 2206 2207 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2208 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2209 if (rc) { 2210 if (rc & BG_ERR_SWAP) 2211 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2212 if (rc & BG_ERR_CHECK) 2213 checking = 0; 2214 } 2215 #endif 2216 2217 split_offset = 0; 2218 do { 2219 /* Check to see if we ran out of space */ 2220 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2221 !(phba->cfg_xpsgl)) 2222 return num_sge + 3; 2223 2224 /* DISEED and DIF have to be together */ 2225 if (!((j + 1) % phba->border_sge_num) || 2226 !((j + 2) % phba->border_sge_num) || 2227 !((j + 3) % phba->border_sge_num)) { 2228 sgl->word2 = 0; 2229 2230 /* set LSP type */ 2231 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2232 2233 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2234 2235 if (unlikely(!sgl_xtra)) { 2236 goto out; 2237 } else { 2238 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2239 sgl_xtra->dma_phys_sgl)); 2240 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2241 sgl_xtra->dma_phys_sgl)); 2242 } 2243 2244 sgl->word2 = cpu_to_le32(sgl->word2); 2245 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2246 2247 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2248 j = 0; 2249 } 2250 2251 /* setup DISEED with what we have */ 2252 diseed = (struct sli4_sge_diseed *) sgl; 2253 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2254 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2255 2256 /* Endianness conversion if necessary */ 2257 diseed->ref_tag = cpu_to_le32(reftag); 2258 diseed->ref_tag_tran = diseed->ref_tag; 2259 2260 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) { 2261 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2262 } else { 2263 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2264 /* 2265 * When in this mode, the hardware will replace 2266 * the guard tag from the host with a 2267 * newly generated good CRC for the wire. 2268 * Switch to raw mode here to avoid this 2269 * behavior. What the host sends gets put on the wire. 2270 */ 2271 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2272 txop = BG_OP_RAW_MODE; 2273 rxop = BG_OP_RAW_MODE; 2274 } 2275 } 2276 2277 2278 if (sc->prot_flags & SCSI_PROT_REF_CHECK) 2279 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2280 else 2281 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2282 2283 /* setup DISEED with the rest of the info */ 2284 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2285 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2286 2287 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2288 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2289 2290 /* Endianness conversion if necessary for DISEED */ 2291 diseed->word2 = cpu_to_le32(diseed->word2); 2292 diseed->word3 = cpu_to_le32(diseed->word3); 2293 2294 /* advance sgl and increment bde count */ 2295 num_sge++; 2296 2297 sgl++; 2298 j++; 2299 2300 /* setup the first BDE that points to protection buffer */ 2301 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2302 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2303 2304 /* must be integer multiple of the DIF block length */ 2305 BUG_ON(protgroup_len % 8); 2306 2307 /* Now setup DIF SGE */ 2308 sgl->word2 = 0; 2309 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2310 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2311 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2312 sgl->word2 = cpu_to_le32(sgl->word2); 2313 sgl->sge_len = 0; 2314 2315 protgrp_blks = protgroup_len / 8; 2316 protgrp_bytes = protgrp_blks * blksize; 2317 2318 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2319 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2320 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2321 protgroup_offset += protgroup_remainder; 2322 protgrp_blks = protgroup_remainder / 8; 2323 protgrp_bytes = protgrp_blks * blksize; 2324 } else { 2325 protgroup_offset = 0; 2326 curr_prot++; 2327 } 2328 2329 num_sge++; 2330 2331 /* setup SGE's for data blocks associated with DIF data */ 2332 pgdone = 0; 2333 subtotal = 0; /* total bytes processed for current prot grp */ 2334 2335 sgl++; 2336 j++; 2337 2338 while (!pgdone) { 2339 /* Check to see if we ran out of space */ 2340 if ((num_sge >= phba->cfg_total_seg_cnt) && 2341 !phba->cfg_xpsgl) 2342 return num_sge + 1; 2343 2344 if (!sgde) { 2345 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2346 "9086 BLKGRD:%s Invalid data segment\n", 2347 __func__); 2348 return 0; 2349 } 2350 2351 if (!((j + 1) % phba->border_sge_num)) { 2352 sgl->word2 = 0; 2353 2354 /* set LSP type */ 2355 bf_set(lpfc_sli4_sge_type, sgl, 2356 LPFC_SGE_TYPE_LSP); 2357 2358 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2359 lpfc_cmd); 2360 2361 if (unlikely(!sgl_xtra)) { 2362 goto out; 2363 } else { 2364 sgl->addr_lo = cpu_to_le32( 2365 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2366 sgl->addr_hi = cpu_to_le32( 2367 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2368 } 2369 2370 sgl->word2 = cpu_to_le32(sgl->word2); 2371 sgl->sge_len = cpu_to_le32( 2372 phba->cfg_sg_dma_buf_size); 2373 2374 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2375 } else { 2376 dataphysaddr = sg_dma_address(sgde) + 2377 split_offset; 2378 2379 remainder = sg_dma_len(sgde) - split_offset; 2380 2381 if ((subtotal + remainder) <= protgrp_bytes) { 2382 /* we can use this whole buffer */ 2383 dma_len = remainder; 2384 split_offset = 0; 2385 2386 if ((subtotal + remainder) == 2387 protgrp_bytes) 2388 pgdone = 1; 2389 } else { 2390 /* must split this buffer with next 2391 * prot grp 2392 */ 2393 dma_len = protgrp_bytes - subtotal; 2394 split_offset += dma_len; 2395 } 2396 2397 subtotal += dma_len; 2398 2399 sgl->word2 = 0; 2400 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2401 dataphysaddr)); 2402 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2403 dataphysaddr)); 2404 bf_set(lpfc_sli4_sge_last, sgl, 0); 2405 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2406 bf_set(lpfc_sli4_sge_type, sgl, 2407 LPFC_SGE_TYPE_DATA); 2408 2409 sgl->sge_len = cpu_to_le32(dma_len); 2410 dma_offset += dma_len; 2411 2412 num_sge++; 2413 curr_data++; 2414 2415 if (split_offset) { 2416 sgl++; 2417 j++; 2418 break; 2419 } 2420 2421 /* Move to the next s/g segment if possible */ 2422 sgde = sg_next(sgde); 2423 2424 sgl++; 2425 } 2426 2427 j++; 2428 } 2429 2430 if (protgroup_offset) { 2431 /* update the reference tag */ 2432 reftag += protgrp_blks; 2433 continue; 2434 } 2435 2436 /* are we done ? */ 2437 if (curr_prot == protcnt) { 2438 /* mark the last SGL */ 2439 sgl--; 2440 bf_set(lpfc_sli4_sge_last, sgl, 1); 2441 alldone = 1; 2442 } else if (curr_prot < protcnt) { 2443 /* advance to next prot buffer */ 2444 sgpe = sg_next(sgpe); 2445 2446 /* update the reference tag */ 2447 reftag += protgrp_blks; 2448 } else { 2449 /* if we're here, we have a bug */ 2450 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2451 "9085 BLKGRD: bug in %s\n", __func__); 2452 } 2453 2454 } while (!alldone); 2455 2456 out: 2457 2458 return num_sge; 2459 } 2460 2461 /** 2462 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2463 * @phba: The Hba for which this call is being executed. 2464 * @sc: pointer to scsi command we're working on 2465 * 2466 * Given a SCSI command that supports DIF, determine composition of protection 2467 * groups involved in setting up buffer lists 2468 * 2469 * Returns: Protection group type (with or without DIF) 2470 * 2471 **/ 2472 static int 2473 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2474 { 2475 int ret = LPFC_PG_TYPE_INVALID; 2476 unsigned char op = scsi_get_prot_op(sc); 2477 2478 switch (op) { 2479 case SCSI_PROT_READ_STRIP: 2480 case SCSI_PROT_WRITE_INSERT: 2481 ret = LPFC_PG_TYPE_NO_DIF; 2482 break; 2483 case SCSI_PROT_READ_INSERT: 2484 case SCSI_PROT_WRITE_STRIP: 2485 case SCSI_PROT_READ_PASS: 2486 case SCSI_PROT_WRITE_PASS: 2487 ret = LPFC_PG_TYPE_DIF_BUF; 2488 break; 2489 default: 2490 if (phba) 2491 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2492 "9021 Unsupported protection op:%d\n", 2493 op); 2494 break; 2495 } 2496 return ret; 2497 } 2498 2499 /** 2500 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2501 * @phba: The Hba for which this call is being executed. 2502 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2503 * 2504 * Adjust the data length to account for how much data 2505 * is actually on the wire. 2506 * 2507 * returns the adjusted data length 2508 **/ 2509 static int 2510 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2511 struct lpfc_io_buf *lpfc_cmd) 2512 { 2513 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2514 int fcpdl; 2515 2516 fcpdl = scsi_bufflen(sc); 2517 2518 /* Check if there is protection data on the wire */ 2519 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2520 /* Read check for protection data */ 2521 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2522 return fcpdl; 2523 2524 } else { 2525 /* Write check for protection data */ 2526 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2527 return fcpdl; 2528 } 2529 2530 /* 2531 * If we are in DIF Type 1 mode every data block has a 8 byte 2532 * DIF (trailer) attached to it. Must ajust FCP data length 2533 * to account for the protection data. 2534 */ 2535 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8; 2536 2537 return fcpdl; 2538 } 2539 2540 /** 2541 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2542 * @phba: The Hba for which this call is being executed. 2543 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2544 * 2545 * This is the protection/DIF aware version of 2546 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2547 * two functions eventually, but for now, it's here. 2548 * RETURNS 0 - SUCCESS, 2549 * 1 - Failed DMA map, retry. 2550 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2551 **/ 2552 static int 2553 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2554 struct lpfc_io_buf *lpfc_cmd) 2555 { 2556 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2557 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2558 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2559 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2560 uint32_t num_bde = 0; 2561 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2562 int prot_group_type = 0; 2563 int fcpdl; 2564 int ret = 1; 2565 struct lpfc_vport *vport = phba->pport; 2566 2567 /* 2568 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2569 * fcp_rsp regions to the first data bde entry 2570 */ 2571 bpl += 2; 2572 if (scsi_sg_count(scsi_cmnd)) { 2573 /* 2574 * The driver stores the segment count returned from pci_map_sg 2575 * because this a count of dma-mappings used to map the use_sg 2576 * pages. They are not guaranteed to be the same for those 2577 * architectures that implement an IOMMU. 2578 */ 2579 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2580 scsi_sglist(scsi_cmnd), 2581 scsi_sg_count(scsi_cmnd), datadir); 2582 if (unlikely(!datasegcnt)) 2583 return 1; 2584 2585 lpfc_cmd->seg_cnt = datasegcnt; 2586 2587 /* First check if data segment count from SCSI Layer is good */ 2588 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2589 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2590 ret = 2; 2591 goto err; 2592 } 2593 2594 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2595 2596 switch (prot_group_type) { 2597 case LPFC_PG_TYPE_NO_DIF: 2598 2599 /* Here we need to add a PDE5 and PDE6 to the count */ 2600 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2601 ret = 2; 2602 goto err; 2603 } 2604 2605 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2606 datasegcnt); 2607 /* we should have 2 or more entries in buffer list */ 2608 if (num_bde < 2) { 2609 ret = 2; 2610 goto err; 2611 } 2612 break; 2613 2614 case LPFC_PG_TYPE_DIF_BUF: 2615 /* 2616 * This type indicates that protection buffers are 2617 * passed to the driver, so that needs to be prepared 2618 * for DMA 2619 */ 2620 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2621 scsi_prot_sglist(scsi_cmnd), 2622 scsi_prot_sg_count(scsi_cmnd), datadir); 2623 if (unlikely(!protsegcnt)) { 2624 scsi_dma_unmap(scsi_cmnd); 2625 return 1; 2626 } 2627 2628 lpfc_cmd->prot_seg_cnt = protsegcnt; 2629 2630 /* 2631 * There is a minimun of 4 BPLs used for every 2632 * protection data segment. 2633 */ 2634 if ((lpfc_cmd->prot_seg_cnt * 4) > 2635 (phba->cfg_total_seg_cnt - 2)) { 2636 ret = 2; 2637 goto err; 2638 } 2639 2640 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2641 datasegcnt, protsegcnt); 2642 /* we should have 3 or more entries in buffer list */ 2643 if ((num_bde < 3) || 2644 (num_bde > phba->cfg_total_seg_cnt)) { 2645 ret = 2; 2646 goto err; 2647 } 2648 break; 2649 2650 case LPFC_PG_TYPE_INVALID: 2651 default: 2652 scsi_dma_unmap(scsi_cmnd); 2653 lpfc_cmd->seg_cnt = 0; 2654 2655 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2656 "9022 Unexpected protection group %i\n", 2657 prot_group_type); 2658 return 2; 2659 } 2660 } 2661 2662 /* 2663 * Finish initializing those IOCB fields that are dependent on the 2664 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2665 * reinitialized since all iocb memory resources are used many times 2666 * for transmit, receive, and continuation bpl's. 2667 */ 2668 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2669 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2670 iocb_cmd->ulpBdeCount = 1; 2671 iocb_cmd->ulpLe = 1; 2672 2673 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2674 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2675 2676 /* 2677 * Due to difference in data length between DIF/non-DIF paths, 2678 * we need to set word 4 of IOCB here 2679 */ 2680 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2681 2682 /* 2683 * For First burst, we may need to adjust the initial transfer 2684 * length for DIF 2685 */ 2686 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2687 (fcpdl < vport->cfg_first_burst_size)) 2688 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2689 2690 return 0; 2691 err: 2692 if (lpfc_cmd->seg_cnt) 2693 scsi_dma_unmap(scsi_cmnd); 2694 if (lpfc_cmd->prot_seg_cnt) 2695 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2696 scsi_prot_sg_count(scsi_cmnd), 2697 scsi_cmnd->sc_data_direction); 2698 2699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2700 "9023 Cannot setup S/G List for HBA" 2701 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2702 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2703 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2704 prot_group_type, num_bde); 2705 2706 lpfc_cmd->seg_cnt = 0; 2707 lpfc_cmd->prot_seg_cnt = 0; 2708 return ret; 2709 } 2710 2711 /* 2712 * This function calcuates the T10 DIF guard tag 2713 * on the specified data using a CRC algorithmn 2714 * using crc_t10dif. 2715 */ 2716 static uint16_t 2717 lpfc_bg_crc(uint8_t *data, int count) 2718 { 2719 uint16_t crc = 0; 2720 uint16_t x; 2721 2722 crc = crc_t10dif(data, count); 2723 x = cpu_to_be16(crc); 2724 return x; 2725 } 2726 2727 /* 2728 * This function calcuates the T10 DIF guard tag 2729 * on the specified data using a CSUM algorithmn 2730 * using ip_compute_csum. 2731 */ 2732 static uint16_t 2733 lpfc_bg_csum(uint8_t *data, int count) 2734 { 2735 uint16_t ret; 2736 2737 ret = ip_compute_csum(data, count); 2738 return ret; 2739 } 2740 2741 /* 2742 * This function examines the protection data to try to determine 2743 * what type of T10-DIF error occurred. 2744 */ 2745 static void 2746 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2747 { 2748 struct scatterlist *sgpe; /* s/g prot entry */ 2749 struct scatterlist *sgde; /* s/g data entry */ 2750 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2751 struct scsi_dif_tuple *src = NULL; 2752 uint8_t *data_src = NULL; 2753 uint16_t guard_tag; 2754 uint16_t start_app_tag, app_tag; 2755 uint32_t start_ref_tag, ref_tag; 2756 int prot, protsegcnt; 2757 int err_type, len, data_len; 2758 int chk_ref, chk_app, chk_guard; 2759 uint16_t sum; 2760 unsigned blksize; 2761 2762 err_type = BGS_GUARD_ERR_MASK; 2763 sum = 0; 2764 guard_tag = 0; 2765 2766 /* First check to see if there is protection data to examine */ 2767 prot = scsi_get_prot_op(cmd); 2768 if ((prot == SCSI_PROT_READ_STRIP) || 2769 (prot == SCSI_PROT_WRITE_INSERT) || 2770 (prot == SCSI_PROT_NORMAL)) 2771 goto out; 2772 2773 /* Currently the driver just supports ref_tag and guard_tag checking */ 2774 chk_ref = 1; 2775 chk_app = 0; 2776 chk_guard = 0; 2777 2778 /* Setup a ptr to the protection data provided by the SCSI host */ 2779 sgpe = scsi_prot_sglist(cmd); 2780 protsegcnt = lpfc_cmd->prot_seg_cnt; 2781 2782 if (sgpe && protsegcnt) { 2783 2784 /* 2785 * We will only try to verify guard tag if the segment 2786 * data length is a multiple of the blksize. 2787 */ 2788 sgde = scsi_sglist(cmd); 2789 blksize = scsi_prot_interval(cmd); 2790 data_src = (uint8_t *)sg_virt(sgde); 2791 data_len = sgde->length; 2792 if ((data_len & (blksize - 1)) == 0) 2793 chk_guard = 1; 2794 2795 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2796 start_ref_tag = scsi_prot_ref_tag(cmd); 2797 if (start_ref_tag == LPFC_INVALID_REFTAG) 2798 goto out; 2799 start_app_tag = src->app_tag; 2800 len = sgpe->length; 2801 while (src && protsegcnt) { 2802 while (len) { 2803 2804 /* 2805 * First check to see if a protection data 2806 * check is valid 2807 */ 2808 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2809 (src->app_tag == T10_PI_APP_ESCAPE)) { 2810 start_ref_tag++; 2811 goto skipit; 2812 } 2813 2814 /* First Guard Tag checking */ 2815 if (chk_guard) { 2816 guard_tag = src->guard_tag; 2817 if (cmd->prot_flags 2818 & SCSI_PROT_IP_CHECKSUM) 2819 sum = lpfc_bg_csum(data_src, 2820 blksize); 2821 else 2822 sum = lpfc_bg_crc(data_src, 2823 blksize); 2824 if ((guard_tag != sum)) { 2825 err_type = BGS_GUARD_ERR_MASK; 2826 goto out; 2827 } 2828 } 2829 2830 /* Reference Tag checking */ 2831 ref_tag = be32_to_cpu(src->ref_tag); 2832 if (chk_ref && (ref_tag != start_ref_tag)) { 2833 err_type = BGS_REFTAG_ERR_MASK; 2834 goto out; 2835 } 2836 start_ref_tag++; 2837 2838 /* App Tag checking */ 2839 app_tag = src->app_tag; 2840 if (chk_app && (app_tag != start_app_tag)) { 2841 err_type = BGS_APPTAG_ERR_MASK; 2842 goto out; 2843 } 2844 skipit: 2845 len -= sizeof(struct scsi_dif_tuple); 2846 if (len < 0) 2847 len = 0; 2848 src++; 2849 2850 data_src += blksize; 2851 data_len -= blksize; 2852 2853 /* 2854 * Are we at the end of the Data segment? 2855 * The data segment is only used for Guard 2856 * tag checking. 2857 */ 2858 if (chk_guard && (data_len == 0)) { 2859 chk_guard = 0; 2860 sgde = sg_next(sgde); 2861 if (!sgde) 2862 goto out; 2863 2864 data_src = (uint8_t *)sg_virt(sgde); 2865 data_len = sgde->length; 2866 if ((data_len & (blksize - 1)) == 0) 2867 chk_guard = 1; 2868 } 2869 } 2870 2871 /* Goto the next Protection data segment */ 2872 sgpe = sg_next(sgpe); 2873 if (sgpe) { 2874 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2875 len = sgpe->length; 2876 } else { 2877 src = NULL; 2878 } 2879 protsegcnt--; 2880 } 2881 } 2882 out: 2883 if (err_type == BGS_GUARD_ERR_MASK) { 2884 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2885 set_host_byte(cmd, DID_ABORT); 2886 phba->bg_guard_err_cnt++; 2887 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2888 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2889 scsi_prot_ref_tag(cmd), 2890 sum, guard_tag); 2891 2892 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2893 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2894 set_host_byte(cmd, DID_ABORT); 2895 2896 phba->bg_reftag_err_cnt++; 2897 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2898 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2899 scsi_prot_ref_tag(cmd), 2900 ref_tag, start_ref_tag); 2901 2902 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2903 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2904 set_host_byte(cmd, DID_ABORT); 2905 2906 phba->bg_apptag_err_cnt++; 2907 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2908 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2909 scsi_prot_ref_tag(cmd), 2910 app_tag, start_app_tag); 2911 } 2912 } 2913 2914 /* 2915 * This function checks for BlockGuard errors detected by 2916 * the HBA. In case of errors, the ASC/ASCQ fields in the 2917 * sense buffer will be set accordingly, paired with 2918 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2919 * detected corruption. 2920 * 2921 * Returns: 2922 * 0 - No error found 2923 * 1 - BlockGuard error found 2924 * -1 - Internal error (bad profile, ...etc) 2925 */ 2926 static int 2927 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2928 struct lpfc_wcqe_complete *wcqe) 2929 { 2930 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2931 int ret = 0; 2932 u32 status = bf_get(lpfc_wcqe_c_status, wcqe); 2933 u32 bghm = 0; 2934 u32 bgstat = 0; 2935 u64 failing_sector = 0; 2936 2937 if (status == CQE_STATUS_DI_ERROR) { 2938 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 2939 bgstat |= BGS_GUARD_ERR_MASK; 2940 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */ 2941 bgstat |= BGS_APPTAG_ERR_MASK; 2942 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */ 2943 bgstat |= BGS_REFTAG_ERR_MASK; 2944 2945 /* Check to see if there was any good data before the error */ 2946 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2947 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2948 bghm = wcqe->total_data_placed; 2949 } 2950 2951 /* 2952 * Set ALL the error bits to indicate we don't know what 2953 * type of error it is. 2954 */ 2955 if (!bgstat) 2956 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 2957 BGS_GUARD_ERR_MASK); 2958 } 2959 2960 if (lpfc_bgs_get_guard_err(bgstat)) { 2961 ret = 1; 2962 2963 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2964 set_host_byte(cmd, DID_ABORT); 2965 phba->bg_guard_err_cnt++; 2966 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2967 "9059 BLKGRD: Guard Tag error in cmd" 2968 " 0x%x lba 0x%llx blk cnt 0x%x " 2969 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2970 (unsigned long long)scsi_get_lba(cmd), 2971 scsi_logical_block_count(cmd), bgstat, bghm); 2972 } 2973 2974 if (lpfc_bgs_get_reftag_err(bgstat)) { 2975 ret = 1; 2976 2977 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2978 set_host_byte(cmd, DID_ABORT); 2979 2980 phba->bg_reftag_err_cnt++; 2981 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2982 "9060 BLKGRD: Ref Tag error in cmd" 2983 " 0x%x lba 0x%llx blk cnt 0x%x " 2984 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2985 (unsigned long long)scsi_get_lba(cmd), 2986 scsi_logical_block_count(cmd), bgstat, bghm); 2987 } 2988 2989 if (lpfc_bgs_get_apptag_err(bgstat)) { 2990 ret = 1; 2991 2992 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2993 set_host_byte(cmd, DID_ABORT); 2994 2995 phba->bg_apptag_err_cnt++; 2996 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2997 "9062 BLKGRD: App Tag error in cmd" 2998 " 0x%x lba 0x%llx blk cnt 0x%x " 2999 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3000 (unsigned long long)scsi_get_lba(cmd), 3001 scsi_logical_block_count(cmd), bgstat, bghm); 3002 } 3003 3004 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3005 /* 3006 * setup sense data descriptor 0 per SPC-4 as an information 3007 * field, and put the failing LBA in it. 3008 * This code assumes there was also a guard/app/ref tag error 3009 * indication. 3010 */ 3011 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3012 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3013 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3014 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3015 3016 /* bghm is a "on the wire" FC frame based count */ 3017 switch (scsi_get_prot_op(cmd)) { 3018 case SCSI_PROT_READ_INSERT: 3019 case SCSI_PROT_WRITE_STRIP: 3020 bghm /= cmd->device->sector_size; 3021 break; 3022 case SCSI_PROT_READ_STRIP: 3023 case SCSI_PROT_WRITE_INSERT: 3024 case SCSI_PROT_READ_PASS: 3025 case SCSI_PROT_WRITE_PASS: 3026 bghm /= (cmd->device->sector_size + 3027 sizeof(struct scsi_dif_tuple)); 3028 break; 3029 } 3030 3031 failing_sector = scsi_get_lba(cmd); 3032 failing_sector += bghm; 3033 3034 /* Descriptor Information */ 3035 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3036 } 3037 3038 if (!ret) { 3039 /* No error was reported - problem in FW? */ 3040 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3041 "9068 BLKGRD: Unknown error in cmd" 3042 " 0x%x lba 0x%llx blk cnt 0x%x " 3043 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3044 (unsigned long long)scsi_get_lba(cmd), 3045 scsi_logical_block_count(cmd), bgstat, bghm); 3046 3047 /* Calculate what type of error it was */ 3048 lpfc_calc_bg_err(phba, lpfc_cmd); 3049 } 3050 return ret; 3051 } 3052 3053 /* 3054 * This function checks for BlockGuard errors detected by 3055 * the HBA. In case of errors, the ASC/ASCQ fields in the 3056 * sense buffer will be set accordingly, paired with 3057 * ILLEGAL_REQUEST to signal to the kernel that the HBA 3058 * detected corruption. 3059 * 3060 * Returns: 3061 * 0 - No error found 3062 * 1 - BlockGuard error found 3063 * -1 - Internal error (bad profile, ...etc) 3064 */ 3065 static int 3066 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 3067 struct lpfc_iocbq *pIocbOut) 3068 { 3069 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 3070 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 3071 int ret = 0; 3072 uint32_t bghm = bgf->bghm; 3073 uint32_t bgstat = bgf->bgstat; 3074 uint64_t failing_sector = 0; 3075 3076 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3077 cmd->result = DID_ERROR << 16; 3078 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3079 "9072 BLKGRD: Invalid BG Profile in cmd " 3080 "0x%x reftag 0x%x blk cnt 0x%x " 3081 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3082 scsi_prot_ref_tag(cmd), 3083 scsi_logical_block_count(cmd), bgstat, bghm); 3084 ret = (-1); 3085 goto out; 3086 } 3087 3088 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3089 cmd->result = DID_ERROR << 16; 3090 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3091 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 3092 "0x%x reftag 0x%x blk cnt 0x%x " 3093 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3094 scsi_prot_ref_tag(cmd), 3095 scsi_logical_block_count(cmd), bgstat, bghm); 3096 ret = (-1); 3097 goto out; 3098 } 3099 3100 if (lpfc_bgs_get_guard_err(bgstat)) { 3101 ret = 1; 3102 3103 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 3104 set_host_byte(cmd, DID_ABORT); 3105 phba->bg_guard_err_cnt++; 3106 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3107 "9055 BLKGRD: Guard Tag error in cmd " 3108 "0x%x reftag 0x%x blk cnt 0x%x " 3109 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3110 scsi_prot_ref_tag(cmd), 3111 scsi_logical_block_count(cmd), bgstat, bghm); 3112 } 3113 3114 if (lpfc_bgs_get_reftag_err(bgstat)) { 3115 ret = 1; 3116 3117 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 3118 set_host_byte(cmd, DID_ABORT); 3119 3120 phba->bg_reftag_err_cnt++; 3121 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3122 "9056 BLKGRD: Ref Tag error in cmd " 3123 "0x%x reftag 0x%x blk cnt 0x%x " 3124 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3125 scsi_prot_ref_tag(cmd), 3126 scsi_logical_block_count(cmd), bgstat, bghm); 3127 } 3128 3129 if (lpfc_bgs_get_apptag_err(bgstat)) { 3130 ret = 1; 3131 3132 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 3133 set_host_byte(cmd, DID_ABORT); 3134 3135 phba->bg_apptag_err_cnt++; 3136 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3137 "9061 BLKGRD: App Tag error in cmd " 3138 "0x%x reftag 0x%x blk cnt 0x%x " 3139 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3140 scsi_prot_ref_tag(cmd), 3141 scsi_logical_block_count(cmd), bgstat, bghm); 3142 } 3143 3144 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3145 /* 3146 * setup sense data descriptor 0 per SPC-4 as an information 3147 * field, and put the failing LBA in it. 3148 * This code assumes there was also a guard/app/ref tag error 3149 * indication. 3150 */ 3151 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3152 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3153 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3154 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3155 3156 /* bghm is a "on the wire" FC frame based count */ 3157 switch (scsi_get_prot_op(cmd)) { 3158 case SCSI_PROT_READ_INSERT: 3159 case SCSI_PROT_WRITE_STRIP: 3160 bghm /= cmd->device->sector_size; 3161 break; 3162 case SCSI_PROT_READ_STRIP: 3163 case SCSI_PROT_WRITE_INSERT: 3164 case SCSI_PROT_READ_PASS: 3165 case SCSI_PROT_WRITE_PASS: 3166 bghm /= (cmd->device->sector_size + 3167 sizeof(struct scsi_dif_tuple)); 3168 break; 3169 } 3170 3171 failing_sector = scsi_get_lba(cmd); 3172 failing_sector += bghm; 3173 3174 /* Descriptor Information */ 3175 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3176 } 3177 3178 if (!ret) { 3179 /* No error was reported - problem in FW? */ 3180 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3181 "9057 BLKGRD: Unknown error in cmd " 3182 "0x%x reftag 0x%x blk cnt 0x%x " 3183 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3184 scsi_prot_ref_tag(cmd), 3185 scsi_logical_block_count(cmd), bgstat, bghm); 3186 3187 /* Calculate what type of error it was */ 3188 lpfc_calc_bg_err(phba, lpfc_cmd); 3189 } 3190 out: 3191 return ret; 3192 } 3193 3194 /** 3195 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3196 * @phba: The Hba for which this call is being executed. 3197 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3198 * 3199 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3200 * field of @lpfc_cmd for device with SLI-4 interface spec. 3201 * 3202 * Return codes: 3203 * 2 - Error - Do not retry 3204 * 1 - Error - Retry 3205 * 0 - Success 3206 **/ 3207 static int 3208 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3209 { 3210 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3211 struct scatterlist *sgel = NULL; 3212 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3213 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3214 struct sli4_sge *first_data_sgl; 3215 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3216 struct lpfc_vport *vport = phba->pport; 3217 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3218 dma_addr_t physaddr; 3219 uint32_t num_bde = 0; 3220 uint32_t dma_len; 3221 uint32_t dma_offset = 0; 3222 int nseg, i, j; 3223 struct ulp_bde64 *bde; 3224 bool lsp_just_set = false; 3225 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3226 3227 /* 3228 * There are three possibilities here - use scatter-gather segment, use 3229 * the single mapping, or neither. Start the lpfc command prep by 3230 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3231 * data bde entry. 3232 */ 3233 if (scsi_sg_count(scsi_cmnd)) { 3234 /* 3235 * The driver stores the segment count returned from pci_map_sg 3236 * because this a count of dma-mappings used to map the use_sg 3237 * pages. They are not guaranteed to be the same for those 3238 * architectures that implement an IOMMU. 3239 */ 3240 3241 nseg = scsi_dma_map(scsi_cmnd); 3242 if (unlikely(nseg <= 0)) 3243 return 1; 3244 sgl += 1; 3245 /* clear the last flag in the fcp_rsp map entry */ 3246 sgl->word2 = le32_to_cpu(sgl->word2); 3247 bf_set(lpfc_sli4_sge_last, sgl, 0); 3248 sgl->word2 = cpu_to_le32(sgl->word2); 3249 sgl += 1; 3250 first_data_sgl = sgl; 3251 lpfc_cmd->seg_cnt = nseg; 3252 if (!phba->cfg_xpsgl && 3253 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3255 "9074 BLKGRD:" 3256 " %s: Too many sg segments from " 3257 "dma_map_sg. Config %d, seg_cnt %d\n", 3258 __func__, phba->cfg_sg_seg_cnt, 3259 lpfc_cmd->seg_cnt); 3260 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3261 lpfc_cmd->seg_cnt = 0; 3262 scsi_dma_unmap(scsi_cmnd); 3263 return 2; 3264 } 3265 3266 /* 3267 * The driver established a maximum scatter-gather segment count 3268 * during probe that limits the number of sg elements in any 3269 * single scsi command. Just run through the seg_cnt and format 3270 * the sge's. 3271 * When using SLI-3 the driver will try to fit all the BDEs into 3272 * the IOCB. If it can't then the BDEs get added to a BPL as it 3273 * does for SLI-2 mode. 3274 */ 3275 3276 /* for tracking segment boundaries */ 3277 sgel = scsi_sglist(scsi_cmnd); 3278 j = 2; 3279 for (i = 0; i < nseg; i++) { 3280 sgl->word2 = 0; 3281 if ((num_bde + 1) == nseg) { 3282 bf_set(lpfc_sli4_sge_last, sgl, 1); 3283 bf_set(lpfc_sli4_sge_type, sgl, 3284 LPFC_SGE_TYPE_DATA); 3285 } else { 3286 bf_set(lpfc_sli4_sge_last, sgl, 0); 3287 3288 /* do we need to expand the segment */ 3289 if (!lsp_just_set && 3290 !((j + 1) % phba->border_sge_num) && 3291 ((nseg - 1) != i)) { 3292 /* set LSP type */ 3293 bf_set(lpfc_sli4_sge_type, sgl, 3294 LPFC_SGE_TYPE_LSP); 3295 3296 sgl_xtra = lpfc_get_sgl_per_hdwq( 3297 phba, lpfc_cmd); 3298 3299 if (unlikely(!sgl_xtra)) { 3300 lpfc_cmd->seg_cnt = 0; 3301 scsi_dma_unmap(scsi_cmnd); 3302 return 1; 3303 } 3304 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3305 sgl_xtra->dma_phys_sgl)); 3306 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3307 sgl_xtra->dma_phys_sgl)); 3308 3309 } else { 3310 bf_set(lpfc_sli4_sge_type, sgl, 3311 LPFC_SGE_TYPE_DATA); 3312 } 3313 } 3314 3315 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3316 LPFC_SGE_TYPE_LSP)) { 3317 if ((nseg - 1) == i) 3318 bf_set(lpfc_sli4_sge_last, sgl, 1); 3319 3320 physaddr = sg_dma_address(sgel); 3321 dma_len = sg_dma_len(sgel); 3322 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3323 physaddr)); 3324 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3325 physaddr)); 3326 3327 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3328 sgl->word2 = cpu_to_le32(sgl->word2); 3329 sgl->sge_len = cpu_to_le32(dma_len); 3330 3331 dma_offset += dma_len; 3332 sgel = sg_next(sgel); 3333 3334 sgl++; 3335 lsp_just_set = false; 3336 3337 } else { 3338 sgl->word2 = cpu_to_le32(sgl->word2); 3339 sgl->sge_len = cpu_to_le32( 3340 phba->cfg_sg_dma_buf_size); 3341 3342 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3343 i = i - 1; 3344 3345 lsp_just_set = true; 3346 } 3347 3348 j++; 3349 } 3350 /* 3351 * Setup the first Payload BDE. For FCoE we just key off 3352 * Performance Hints, for FC we use lpfc_enable_pbde. 3353 * We populate words 13-15 of IOCB/WQE. 3354 */ 3355 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3356 phba->cfg_enable_pbde) { 3357 bde = (struct ulp_bde64 *) 3358 &wqe->words[13]; 3359 bde->addrLow = first_data_sgl->addr_lo; 3360 bde->addrHigh = first_data_sgl->addr_hi; 3361 bde->tus.f.bdeSize = 3362 le32_to_cpu(first_data_sgl->sge_len); 3363 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3364 bde->tus.w = cpu_to_le32(bde->tus.w); 3365 3366 } else { 3367 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3368 } 3369 } else { 3370 sgl += 1; 3371 /* clear the last flag in the fcp_rsp map entry */ 3372 sgl->word2 = le32_to_cpu(sgl->word2); 3373 bf_set(lpfc_sli4_sge_last, sgl, 1); 3374 sgl->word2 = cpu_to_le32(sgl->word2); 3375 3376 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3377 phba->cfg_enable_pbde) { 3378 bde = (struct ulp_bde64 *) 3379 &wqe->words[13]; 3380 memset(bde, 0, (sizeof(uint32_t) * 3)); 3381 } 3382 } 3383 3384 /* Word 11 */ 3385 if (phba->cfg_enable_pbde) 3386 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3387 3388 /* 3389 * Finish initializing those IOCB fields that are dependent on the 3390 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3391 * explicitly reinitialized. 3392 * all iocb memory resources are reused. 3393 */ 3394 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3395 /* Set first-burst provided it was successfully negotiated */ 3396 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3397 vport->cfg_first_burst_size && 3398 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3399 u32 init_len, total_len; 3400 3401 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3402 init_len = min(total_len, vport->cfg_first_burst_size); 3403 3404 /* Word 4 & 5 */ 3405 wqe->fcp_iwrite.initial_xfer_len = init_len; 3406 wqe->fcp_iwrite.total_xfer_len = total_len; 3407 } else { 3408 /* Word 4 */ 3409 wqe->fcp_iwrite.total_xfer_len = 3410 be32_to_cpu(fcp_cmnd->fcpDl); 3411 } 3412 3413 /* 3414 * If the OAS driver feature is enabled and the lun is enabled for 3415 * OAS, set the oas iocb related flags. 3416 */ 3417 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3418 scsi_cmnd->device->hostdata)->oas_enabled) { 3419 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3420 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3421 scsi_cmnd->device->hostdata)->priority; 3422 3423 /* Word 10 */ 3424 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3425 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3426 3427 if (lpfc_cmd->cur_iocbq.priority) 3428 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3429 (lpfc_cmd->cur_iocbq.priority << 1)); 3430 else 3431 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3432 (phba->cfg_XLanePriority << 1)); 3433 } 3434 3435 return 0; 3436 } 3437 3438 /** 3439 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3440 * @phba: The Hba for which this call is being executed. 3441 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3442 * 3443 * This is the protection/DIF aware version of 3444 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3445 * two functions eventually, but for now, it's here 3446 * Return codes: 3447 * 2 - Error - Do not retry 3448 * 1 - Error - Retry 3449 * 0 - Success 3450 **/ 3451 static int 3452 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3453 struct lpfc_io_buf *lpfc_cmd) 3454 { 3455 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3456 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3457 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3458 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3459 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3460 uint32_t num_sge = 0; 3461 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3462 int prot_group_type = 0; 3463 int fcpdl; 3464 int ret = 1; 3465 struct lpfc_vport *vport = phba->pport; 3466 3467 /* 3468 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3469 * fcp_rsp regions to the first data sge entry 3470 */ 3471 if (scsi_sg_count(scsi_cmnd)) { 3472 /* 3473 * The driver stores the segment count returned from pci_map_sg 3474 * because this a count of dma-mappings used to map the use_sg 3475 * pages. They are not guaranteed to be the same for those 3476 * architectures that implement an IOMMU. 3477 */ 3478 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3479 scsi_sglist(scsi_cmnd), 3480 scsi_sg_count(scsi_cmnd), datadir); 3481 if (unlikely(!datasegcnt)) 3482 return 1; 3483 3484 sgl += 1; 3485 /* clear the last flag in the fcp_rsp map entry */ 3486 sgl->word2 = le32_to_cpu(sgl->word2); 3487 bf_set(lpfc_sli4_sge_last, sgl, 0); 3488 sgl->word2 = cpu_to_le32(sgl->word2); 3489 3490 sgl += 1; 3491 lpfc_cmd->seg_cnt = datasegcnt; 3492 3493 /* First check if data segment count from SCSI Layer is good */ 3494 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3495 !phba->cfg_xpsgl) { 3496 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3497 ret = 2; 3498 goto err; 3499 } 3500 3501 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3502 3503 switch (prot_group_type) { 3504 case LPFC_PG_TYPE_NO_DIF: 3505 /* Here we need to add a DISEED to the count */ 3506 if (((lpfc_cmd->seg_cnt + 1) > 3507 phba->cfg_total_seg_cnt) && 3508 !phba->cfg_xpsgl) { 3509 ret = 2; 3510 goto err; 3511 } 3512 3513 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3514 datasegcnt, lpfc_cmd); 3515 3516 /* we should have 2 or more entries in buffer list */ 3517 if (num_sge < 2) { 3518 ret = 2; 3519 goto err; 3520 } 3521 break; 3522 3523 case LPFC_PG_TYPE_DIF_BUF: 3524 /* 3525 * This type indicates that protection buffers are 3526 * passed to the driver, so that needs to be prepared 3527 * for DMA 3528 */ 3529 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3530 scsi_prot_sglist(scsi_cmnd), 3531 scsi_prot_sg_count(scsi_cmnd), datadir); 3532 if (unlikely(!protsegcnt)) { 3533 scsi_dma_unmap(scsi_cmnd); 3534 return 1; 3535 } 3536 3537 lpfc_cmd->prot_seg_cnt = protsegcnt; 3538 /* 3539 * There is a minimun of 3 SGEs used for every 3540 * protection data segment. 3541 */ 3542 if (((lpfc_cmd->prot_seg_cnt * 3) > 3543 (phba->cfg_total_seg_cnt - 2)) && 3544 !phba->cfg_xpsgl) { 3545 ret = 2; 3546 goto err; 3547 } 3548 3549 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3550 datasegcnt, protsegcnt, lpfc_cmd); 3551 3552 /* we should have 3 or more entries in buffer list */ 3553 if (num_sge < 3 || 3554 (num_sge > phba->cfg_total_seg_cnt && 3555 !phba->cfg_xpsgl)) { 3556 ret = 2; 3557 goto err; 3558 } 3559 break; 3560 3561 case LPFC_PG_TYPE_INVALID: 3562 default: 3563 scsi_dma_unmap(scsi_cmnd); 3564 lpfc_cmd->seg_cnt = 0; 3565 3566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3567 "9083 Unexpected protection group %i\n", 3568 prot_group_type); 3569 return 2; 3570 } 3571 } 3572 3573 switch (scsi_get_prot_op(scsi_cmnd)) { 3574 case SCSI_PROT_WRITE_STRIP: 3575 case SCSI_PROT_READ_STRIP: 3576 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; 3577 break; 3578 case SCSI_PROT_WRITE_INSERT: 3579 case SCSI_PROT_READ_INSERT: 3580 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; 3581 break; 3582 case SCSI_PROT_WRITE_PASS: 3583 case SCSI_PROT_READ_PASS: 3584 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; 3585 break; 3586 } 3587 3588 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3589 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3590 3591 /* Set first-burst provided it was successfully negotiated */ 3592 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3593 vport->cfg_first_burst_size && 3594 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3595 u32 init_len, total_len; 3596 3597 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3598 init_len = min(total_len, vport->cfg_first_burst_size); 3599 3600 /* Word 4 & 5 */ 3601 wqe->fcp_iwrite.initial_xfer_len = init_len; 3602 wqe->fcp_iwrite.total_xfer_len = total_len; 3603 } else { 3604 /* Word 4 */ 3605 wqe->fcp_iwrite.total_xfer_len = 3606 be32_to_cpu(fcp_cmnd->fcpDl); 3607 } 3608 3609 /* 3610 * If the OAS driver feature is enabled and the lun is enabled for 3611 * OAS, set the oas iocb related flags. 3612 */ 3613 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3614 scsi_cmnd->device->hostdata)->oas_enabled) { 3615 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3616 3617 /* Word 10 */ 3618 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3619 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3620 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3621 (phba->cfg_XLanePriority << 1)); 3622 } 3623 3624 /* Word 7. DIF Flags */ 3625 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS) 3626 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3627 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP) 3628 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3629 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT) 3630 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3631 3632 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS | 3633 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3634 3635 return 0; 3636 err: 3637 if (lpfc_cmd->seg_cnt) 3638 scsi_dma_unmap(scsi_cmnd); 3639 if (lpfc_cmd->prot_seg_cnt) 3640 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3641 scsi_prot_sg_count(scsi_cmnd), 3642 scsi_cmnd->sc_data_direction); 3643 3644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3645 "9084 Cannot setup S/G List for HBA" 3646 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3647 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3648 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3649 prot_group_type, num_sge); 3650 3651 lpfc_cmd->seg_cnt = 0; 3652 lpfc_cmd->prot_seg_cnt = 0; 3653 return ret; 3654 } 3655 3656 /** 3657 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3658 * @phba: The Hba for which this call is being executed. 3659 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3660 * 3661 * This routine wraps the actual DMA mapping function pointer from the 3662 * lpfc_hba struct. 3663 * 3664 * Return codes: 3665 * 1 - Error 3666 * 0 - Success 3667 **/ 3668 static inline int 3669 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3670 { 3671 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3672 } 3673 3674 /** 3675 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3676 * using BlockGuard. 3677 * @phba: The Hba for which this call is being executed. 3678 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3679 * 3680 * This routine wraps the actual DMA mapping function pointer from the 3681 * lpfc_hba struct. 3682 * 3683 * Return codes: 3684 * 1 - Error 3685 * 0 - Success 3686 **/ 3687 static inline int 3688 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3689 { 3690 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3691 } 3692 3693 /** 3694 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3695 * buffer 3696 * @vport: Pointer to vport object. 3697 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3698 * @tmo: Timeout value for IO 3699 * 3700 * This routine initializes IOCB/WQE data structure from scsi command 3701 * 3702 * Return codes: 3703 * 1 - Error 3704 * 0 - Success 3705 **/ 3706 static inline int 3707 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3708 uint8_t tmo) 3709 { 3710 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3711 } 3712 3713 /** 3714 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3715 * @phba: Pointer to hba context object. 3716 * @vport: Pointer to vport object. 3717 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3718 * @fcpi_parm: FCP Initiator parameter. 3719 * 3720 * This function posts an event when there is a SCSI command reporting 3721 * error from the scsi device. 3722 **/ 3723 static void 3724 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3725 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3726 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3727 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3728 uint32_t resp_info = fcprsp->rspStatus2; 3729 uint32_t scsi_status = fcprsp->rspStatus3; 3730 struct lpfc_fast_path_event *fast_path_evt = NULL; 3731 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3732 unsigned long flags; 3733 3734 if (!pnode) 3735 return; 3736 3737 /* If there is queuefull or busy condition send a scsi event */ 3738 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3739 (cmnd->result == SAM_STAT_BUSY)) { 3740 fast_path_evt = lpfc_alloc_fast_evt(phba); 3741 if (!fast_path_evt) 3742 return; 3743 fast_path_evt->un.scsi_evt.event_type = 3744 FC_REG_SCSI_EVENT; 3745 fast_path_evt->un.scsi_evt.subcategory = 3746 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3747 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3748 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3749 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3750 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3751 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3752 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3753 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3754 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3755 fast_path_evt = lpfc_alloc_fast_evt(phba); 3756 if (!fast_path_evt) 3757 return; 3758 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3759 FC_REG_SCSI_EVENT; 3760 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3761 LPFC_EVENT_CHECK_COND; 3762 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3763 cmnd->device->lun; 3764 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3765 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3766 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3767 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3768 fast_path_evt->un.check_cond_evt.sense_key = 3769 cmnd->sense_buffer[2] & 0xf; 3770 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3771 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3772 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3773 fcpi_parm && 3774 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3775 ((scsi_status == SAM_STAT_GOOD) && 3776 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3777 /* 3778 * If status is good or resid does not match with fcp_param and 3779 * there is valid fcpi_parm, then there is a read_check error 3780 */ 3781 fast_path_evt = lpfc_alloc_fast_evt(phba); 3782 if (!fast_path_evt) 3783 return; 3784 fast_path_evt->un.read_check_error.header.event_type = 3785 FC_REG_FABRIC_EVENT; 3786 fast_path_evt->un.read_check_error.header.subcategory = 3787 LPFC_EVENT_FCPRDCHKERR; 3788 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3789 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3790 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3791 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3792 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3793 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3794 fast_path_evt->un.read_check_error.fcpiparam = 3795 fcpi_parm; 3796 } else 3797 return; 3798 3799 fast_path_evt->vport = vport; 3800 spin_lock_irqsave(&phba->hbalock, flags); 3801 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3802 spin_unlock_irqrestore(&phba->hbalock, flags); 3803 lpfc_worker_wake_up(phba); 3804 return; 3805 } 3806 3807 /** 3808 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3809 * @phba: The HBA for which this call is being executed. 3810 * @psb: The scsi buffer which is going to be un-mapped. 3811 * 3812 * This routine does DMA un-mapping of scatter gather list of scsi command 3813 * field of @lpfc_cmd for device with SLI-3 interface spec. 3814 **/ 3815 static void 3816 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3817 { 3818 /* 3819 * There are only two special cases to consider. (1) the scsi command 3820 * requested scatter-gather usage or (2) the scsi command allocated 3821 * a request buffer, but did not request use_sg. There is a third 3822 * case, but it does not require resource deallocation. 3823 */ 3824 if (psb->seg_cnt > 0) 3825 scsi_dma_unmap(psb->pCmd); 3826 if (psb->prot_seg_cnt > 0) 3827 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3828 scsi_prot_sg_count(psb->pCmd), 3829 psb->pCmd->sc_data_direction); 3830 } 3831 3832 /** 3833 * lpfc_unblock_requests - allow further commands to be queued. 3834 * @phba: pointer to phba object 3835 * 3836 * For single vport, just call scsi_unblock_requests on physical port. 3837 * For multiple vports, send scsi_unblock_requests for all the vports. 3838 */ 3839 void 3840 lpfc_unblock_requests(struct lpfc_hba *phba) 3841 { 3842 struct lpfc_vport **vports; 3843 struct Scsi_Host *shost; 3844 int i; 3845 3846 if (phba->sli_rev == LPFC_SLI_REV4 && 3847 !phba->sli4_hba.max_cfg_param.vpi_used) { 3848 shost = lpfc_shost_from_vport(phba->pport); 3849 scsi_unblock_requests(shost); 3850 return; 3851 } 3852 3853 vports = lpfc_create_vport_work_array(phba); 3854 if (vports != NULL) 3855 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3856 shost = lpfc_shost_from_vport(vports[i]); 3857 scsi_unblock_requests(shost); 3858 } 3859 lpfc_destroy_vport_work_array(phba, vports); 3860 } 3861 3862 /** 3863 * lpfc_block_requests - prevent further commands from being queued. 3864 * @phba: pointer to phba object 3865 * 3866 * For single vport, just call scsi_block_requests on physical port. 3867 * For multiple vports, send scsi_block_requests for all the vports. 3868 */ 3869 void 3870 lpfc_block_requests(struct lpfc_hba *phba) 3871 { 3872 struct lpfc_vport **vports; 3873 struct Scsi_Host *shost; 3874 int i; 3875 3876 if (atomic_read(&phba->cmf_stop_io)) 3877 return; 3878 3879 if (phba->sli_rev == LPFC_SLI_REV4 && 3880 !phba->sli4_hba.max_cfg_param.vpi_used) { 3881 shost = lpfc_shost_from_vport(phba->pport); 3882 scsi_block_requests(shost); 3883 return; 3884 } 3885 3886 vports = lpfc_create_vport_work_array(phba); 3887 if (vports != NULL) 3888 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3889 shost = lpfc_shost_from_vport(vports[i]); 3890 scsi_block_requests(shost); 3891 } 3892 lpfc_destroy_vport_work_array(phba, vports); 3893 } 3894 3895 /** 3896 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion 3897 * @phba: The HBA for which this call is being executed. 3898 * @time: The latency of the IO that completed (in ns) 3899 * @size: The size of the IO that completed 3900 * @shost: SCSI host the IO completed on (NULL for a NVME IO) 3901 * 3902 * The routine adjusts the various Burst and Bandwidth counters used in 3903 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT, 3904 * that means the IO was never issued to the HBA, so this routine is 3905 * just being called to cleanup the counter from a previous 3906 * lpfc_update_cmf_cmd call. 3907 */ 3908 int 3909 lpfc_update_cmf_cmpl(struct lpfc_hba *phba, 3910 uint64_t time, uint32_t size, struct Scsi_Host *shost) 3911 { 3912 struct lpfc_cgn_stat *cgs; 3913 3914 if (time != LPFC_CGN_NOT_SENT) { 3915 /* lat is ns coming in, save latency in us */ 3916 if (time < 1000) 3917 time = 1; 3918 else 3919 time = div_u64(time + 500, 1000); /* round it */ 3920 3921 cgs = this_cpu_ptr(phba->cmf_stat); 3922 atomic64_add(size, &cgs->rcv_bytes); 3923 atomic64_add(time, &cgs->rx_latency); 3924 atomic_inc(&cgs->rx_io_cnt); 3925 } 3926 return 0; 3927 } 3928 3929 /** 3930 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission 3931 * @phba: The HBA for which this call is being executed. 3932 * @size: The size of the IO that will be issued 3933 * 3934 * The routine adjusts the various Burst and Bandwidth counters used in 3935 * Congestion management and E2E. 3936 */ 3937 int 3938 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size) 3939 { 3940 uint64_t total; 3941 struct lpfc_cgn_stat *cgs; 3942 int cpu; 3943 3944 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */ 3945 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) { 3946 total = 0; 3947 for_each_present_cpu(cpu) { 3948 cgs = per_cpu_ptr(phba->cmf_stat, cpu); 3949 total += atomic64_read(&cgs->total_bytes); 3950 } 3951 if (total >= phba->cmf_max_bytes_per_interval) { 3952 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) { 3953 lpfc_block_requests(phba); 3954 phba->cmf_last_ts = 3955 lpfc_calc_cmf_latency(phba); 3956 } 3957 atomic_inc(&phba->cmf_busy); 3958 return -EBUSY; 3959 } 3960 if (size > atomic_read(&phba->rx_max_read_cnt)) 3961 atomic_set(&phba->rx_max_read_cnt, size); 3962 } 3963 3964 cgs = this_cpu_ptr(phba->cmf_stat); 3965 atomic64_add(size, &cgs->total_bytes); 3966 return 0; 3967 } 3968 3969 /** 3970 * lpfc_handle_fcp_err - FCP response handler 3971 * @vport: The virtual port for which this call is being executed. 3972 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3973 * @fcpi_parm: FCP Initiator parameter. 3974 * 3975 * This routine is called to process response IOCB with status field 3976 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3977 * based upon SCSI and FCP error. 3978 **/ 3979 static void 3980 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3981 uint32_t fcpi_parm) 3982 { 3983 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3984 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3985 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3986 uint32_t resp_info = fcprsp->rspStatus2; 3987 uint32_t scsi_status = fcprsp->rspStatus3; 3988 uint32_t *lp; 3989 uint32_t host_status = DID_OK; 3990 uint32_t rsplen = 0; 3991 uint32_t fcpDl; 3992 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3993 3994 3995 /* 3996 * If this is a task management command, there is no 3997 * scsi packet associated with this lpfc_cmd. The driver 3998 * consumes it. 3999 */ 4000 if (fcpcmd->fcpCntl2) { 4001 scsi_status = 0; 4002 goto out; 4003 } 4004 4005 if (resp_info & RSP_LEN_VALID) { 4006 rsplen = be32_to_cpu(fcprsp->rspRspLen); 4007 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 4008 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4009 "2719 Invalid response length: " 4010 "tgt x%x lun x%llx cmnd x%x rsplen " 4011 "x%x\n", cmnd->device->id, 4012 cmnd->device->lun, cmnd->cmnd[0], 4013 rsplen); 4014 host_status = DID_ERROR; 4015 goto out; 4016 } 4017 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 4018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4019 "2757 Protocol failure detected during " 4020 "processing of FCP I/O op: " 4021 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 4022 cmnd->device->id, 4023 cmnd->device->lun, cmnd->cmnd[0], 4024 fcprsp->rspInfo3); 4025 host_status = DID_ERROR; 4026 goto out; 4027 } 4028 } 4029 4030 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 4031 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 4032 if (snslen > SCSI_SENSE_BUFFERSIZE) 4033 snslen = SCSI_SENSE_BUFFERSIZE; 4034 4035 if (resp_info & RSP_LEN_VALID) 4036 rsplen = be32_to_cpu(fcprsp->rspRspLen); 4037 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 4038 } 4039 lp = (uint32_t *)cmnd->sense_buffer; 4040 4041 /* special handling for under run conditions */ 4042 if (!scsi_status && (resp_info & RESID_UNDER)) { 4043 /* don't log under runs if fcp set... */ 4044 if (vport->cfg_log_verbose & LOG_FCP) 4045 logit = LOG_FCP_ERROR; 4046 /* unless operator says so */ 4047 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 4048 logit = LOG_FCP_UNDER; 4049 } 4050 4051 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4052 "9024 FCP command x%x failed: x%x SNS x%x x%x " 4053 "Data: x%x x%x x%x x%x x%x\n", 4054 cmnd->cmnd[0], scsi_status, 4055 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 4056 be32_to_cpu(fcprsp->rspResId), 4057 be32_to_cpu(fcprsp->rspSnsLen), 4058 be32_to_cpu(fcprsp->rspRspLen), 4059 fcprsp->rspInfo3); 4060 4061 scsi_set_resid(cmnd, 0); 4062 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 4063 if (resp_info & RESID_UNDER) { 4064 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 4065 4066 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 4067 "9025 FCP Underrun, expected %d, " 4068 "residual %d Data: x%x x%x x%x\n", 4069 fcpDl, 4070 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 4071 cmnd->underflow); 4072 4073 /* 4074 * If there is an under run, check if under run reported by 4075 * storage array is same as the under run reported by HBA. 4076 * If this is not same, there is a dropped frame. 4077 */ 4078 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 4079 lpfc_printf_vlog(vport, KERN_WARNING, 4080 LOG_FCP | LOG_FCP_ERROR, 4081 "9026 FCP Read Check Error " 4082 "and Underrun Data: x%x x%x x%x x%x\n", 4083 fcpDl, 4084 scsi_get_resid(cmnd), fcpi_parm, 4085 cmnd->cmnd[0]); 4086 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4087 host_status = DID_ERROR; 4088 } 4089 /* 4090 * The cmnd->underflow is the minimum number of bytes that must 4091 * be transferred for this command. Provided a sense condition 4092 * is not present, make sure the actual amount transferred is at 4093 * least the underflow value or fail. 4094 */ 4095 if (!(resp_info & SNS_LEN_VALID) && 4096 (scsi_status == SAM_STAT_GOOD) && 4097 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 4098 < cmnd->underflow)) { 4099 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4100 "9027 FCP command x%x residual " 4101 "underrun converted to error " 4102 "Data: x%x x%x x%x\n", 4103 cmnd->cmnd[0], scsi_bufflen(cmnd), 4104 scsi_get_resid(cmnd), cmnd->underflow); 4105 host_status = DID_ERROR; 4106 } 4107 } else if (resp_info & RESID_OVER) { 4108 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4109 "9028 FCP command x%x residual overrun error. " 4110 "Data: x%x x%x\n", cmnd->cmnd[0], 4111 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 4112 host_status = DID_ERROR; 4113 4114 /* 4115 * Check SLI validation that all the transfer was actually done 4116 * (fcpi_parm should be zero). Apply check only to reads. 4117 */ 4118 } else if (fcpi_parm) { 4119 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 4120 "9029 FCP %s Check Error Data: " 4121 "x%x x%x x%x x%x x%x\n", 4122 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 4123 "Read" : "Write"), 4124 fcpDl, be32_to_cpu(fcprsp->rspResId), 4125 fcpi_parm, cmnd->cmnd[0], scsi_status); 4126 4127 /* There is some issue with the LPe12000 that causes it 4128 * to miscalculate the fcpi_parm and falsely trip this 4129 * recovery logic. Detect this case and don't error when true. 4130 */ 4131 if (fcpi_parm > fcpDl) 4132 goto out; 4133 4134 switch (scsi_status) { 4135 case SAM_STAT_GOOD: 4136 case SAM_STAT_CHECK_CONDITION: 4137 /* Fabric dropped a data frame. Fail any successful 4138 * command in which we detected dropped frames. 4139 * A status of good or some check conditions could 4140 * be considered a successful command. 4141 */ 4142 host_status = DID_ERROR; 4143 break; 4144 } 4145 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4146 } 4147 4148 out: 4149 cmnd->result = host_status << 16 | scsi_status; 4150 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 4151 } 4152 4153 /** 4154 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 4155 * @phba: The hba for which this call is being executed. 4156 * @pwqeIn: The command WQE for the scsi cmnd. 4157 * @wcqe: Pointer to driver response CQE object. 4158 * 4159 * This routine assigns scsi command result by looking into response WQE 4160 * status field appropriately. This routine handles QUEUE FULL condition as 4161 * well by ramping down device queue depth. 4162 **/ 4163 static void 4164 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 4165 struct lpfc_wcqe_complete *wcqe) 4166 { 4167 struct lpfc_io_buf *lpfc_cmd = 4168 (struct lpfc_io_buf *)pwqeIn->context1; 4169 struct lpfc_vport *vport = pwqeIn->vport; 4170 struct lpfc_rport_data *rdata; 4171 struct lpfc_nodelist *ndlp; 4172 struct scsi_cmnd *cmd; 4173 unsigned long flags; 4174 struct lpfc_fast_path_event *fast_path_evt; 4175 struct Scsi_Host *shost; 4176 u32 logit = LOG_FCP; 4177 u32 status, idx; 4178 unsigned long iflags = 0; 4179 u32 lat; 4180 u8 wait_xb_clr = 0; 4181 4182 /* Sanity check on return of outstanding command */ 4183 if (!lpfc_cmd) { 4184 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4185 "9032 Null lpfc_cmd pointer. No " 4186 "release, skip completion\n"); 4187 return; 4188 } 4189 4190 rdata = lpfc_cmd->rdata; 4191 ndlp = rdata->pnode; 4192 4193 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4194 /* TOREMOVE - currently this flag is checked during 4195 * the release of lpfc_iocbq. Remove once we move 4196 * to lpfc_wqe_job construct. 4197 * 4198 * This needs to be done outside buf_lock 4199 */ 4200 spin_lock_irqsave(&phba->hbalock, iflags); 4201 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY; 4202 spin_unlock_irqrestore(&phba->hbalock, iflags); 4203 } 4204 4205 /* Guard against abort handler being called at same time */ 4206 spin_lock(&lpfc_cmd->buf_lock); 4207 4208 /* Sanity check on return of outstanding command */ 4209 cmd = lpfc_cmd->pCmd; 4210 if (!cmd) { 4211 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4212 "9042 I/O completion: Not an active IO\n"); 4213 spin_unlock(&lpfc_cmd->buf_lock); 4214 lpfc_release_scsi_buf(phba, lpfc_cmd); 4215 return; 4216 } 4217 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4218 if (phba->sli4_hba.hdwq) 4219 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4220 4221 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4222 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4223 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4224 #endif 4225 shost = cmd->device->host; 4226 4227 status = bf_get(lpfc_wcqe_c_status, wcqe); 4228 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); 4229 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4230 4231 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4232 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4233 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4234 if (phba->cfg_fcp_wait_abts_rsp) 4235 wait_xb_clr = 1; 4236 } 4237 4238 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4239 if (lpfc_cmd->prot_data_type) { 4240 struct scsi_dif_tuple *src = NULL; 4241 4242 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4243 /* 4244 * Used to restore any changes to protection 4245 * data for error injection. 4246 */ 4247 switch (lpfc_cmd->prot_data_type) { 4248 case LPFC_INJERR_REFTAG: 4249 src->ref_tag = 4250 lpfc_cmd->prot_data; 4251 break; 4252 case LPFC_INJERR_APPTAG: 4253 src->app_tag = 4254 (uint16_t)lpfc_cmd->prot_data; 4255 break; 4256 case LPFC_INJERR_GUARD: 4257 src->guard_tag = 4258 (uint16_t)lpfc_cmd->prot_data; 4259 break; 4260 default: 4261 break; 4262 } 4263 4264 lpfc_cmd->prot_data = 0; 4265 lpfc_cmd->prot_data_type = 0; 4266 lpfc_cmd->prot_data_segment = NULL; 4267 } 4268 #endif 4269 if (unlikely(lpfc_cmd->status)) { 4270 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4271 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4272 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4273 else if (lpfc_cmd->status >= IOSTAT_CNT) 4274 lpfc_cmd->status = IOSTAT_DEFAULT; 4275 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4276 !lpfc_cmd->fcp_rsp->rspStatus3 && 4277 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4278 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4279 logit = 0; 4280 else 4281 logit = LOG_FCP | LOG_FCP_UNDER; 4282 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4283 "9034 FCP cmd x%x failed <%d/%lld> " 4284 "status: x%x result: x%x " 4285 "sid: x%x did: x%x oxid: x%x " 4286 "Data: x%x x%x x%x\n", 4287 cmd->cmnd[0], 4288 cmd->device ? cmd->device->id : 0xffff, 4289 cmd->device ? cmd->device->lun : 0xffff, 4290 lpfc_cmd->status, lpfc_cmd->result, 4291 vport->fc_myDID, 4292 (ndlp) ? ndlp->nlp_DID : 0, 4293 lpfc_cmd->cur_iocbq.sli4_xritag, 4294 wcqe->parameter, wcqe->total_data_placed, 4295 lpfc_cmd->cur_iocbq.iotag); 4296 } 4297 4298 switch (lpfc_cmd->status) { 4299 case IOSTAT_SUCCESS: 4300 cmd->result = DID_OK << 16; 4301 break; 4302 case IOSTAT_FCP_RSP_ERROR: 4303 lpfc_handle_fcp_err(vport, lpfc_cmd, 4304 pwqeIn->wqe.fcp_iread.total_xfer_len - 4305 wcqe->total_data_placed); 4306 break; 4307 case IOSTAT_NPORT_BSY: 4308 case IOSTAT_FABRIC_BSY: 4309 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4310 fast_path_evt = lpfc_alloc_fast_evt(phba); 4311 if (!fast_path_evt) 4312 break; 4313 fast_path_evt->un.fabric_evt.event_type = 4314 FC_REG_FABRIC_EVENT; 4315 fast_path_evt->un.fabric_evt.subcategory = 4316 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4317 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4318 if (ndlp) { 4319 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4320 &ndlp->nlp_portname, 4321 sizeof(struct lpfc_name)); 4322 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4323 &ndlp->nlp_nodename, 4324 sizeof(struct lpfc_name)); 4325 } 4326 fast_path_evt->vport = vport; 4327 fast_path_evt->work_evt.evt = 4328 LPFC_EVT_FASTPATH_MGMT_EVT; 4329 spin_lock_irqsave(&phba->hbalock, flags); 4330 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4331 &phba->work_list); 4332 spin_unlock_irqrestore(&phba->hbalock, flags); 4333 lpfc_worker_wake_up(phba); 4334 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4335 "9035 Fabric/Node busy FCP cmd x%x failed" 4336 " <%d/%lld> " 4337 "status: x%x result: x%x " 4338 "sid: x%x did: x%x oxid: x%x " 4339 "Data: x%x x%x x%x\n", 4340 cmd->cmnd[0], 4341 cmd->device ? cmd->device->id : 0xffff, 4342 cmd->device ? cmd->device->lun : 0xffff, 4343 lpfc_cmd->status, lpfc_cmd->result, 4344 vport->fc_myDID, 4345 (ndlp) ? ndlp->nlp_DID : 0, 4346 lpfc_cmd->cur_iocbq.sli4_xritag, 4347 wcqe->parameter, 4348 wcqe->total_data_placed, 4349 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4350 break; 4351 case IOSTAT_REMOTE_STOP: 4352 if (ndlp) { 4353 /* This I/O was aborted by the target, we don't 4354 * know the rxid and because we did not send the 4355 * ABTS we cannot generate and RRQ. 4356 */ 4357 lpfc_set_rrq_active(phba, ndlp, 4358 lpfc_cmd->cur_iocbq.sli4_lxritag, 4359 0, 0); 4360 } 4361 fallthrough; 4362 case IOSTAT_LOCAL_REJECT: 4363 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4364 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4365 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4366 lpfc_cmd->result == 4367 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4368 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4369 lpfc_cmd->result == 4370 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4371 cmd->result = DID_NO_CONNECT << 16; 4372 break; 4373 } 4374 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4375 lpfc_cmd->result == IOERR_NO_RESOURCES || 4376 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4377 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4378 cmd->result = DID_REQUEUE << 16; 4379 break; 4380 } 4381 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4382 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4383 status == CQE_STATUS_DI_ERROR) { 4384 if (scsi_get_prot_op(cmd) != 4385 SCSI_PROT_NORMAL) { 4386 /* 4387 * This is a response for a BG enabled 4388 * cmd. Parse BG error 4389 */ 4390 lpfc_sli4_parse_bg_err(phba, lpfc_cmd, 4391 wcqe); 4392 break; 4393 } 4394 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4395 "9040 non-zero BGSTAT on unprotected cmd\n"); 4396 } 4397 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4398 "9036 Local Reject FCP cmd x%x failed" 4399 " <%d/%lld> " 4400 "status: x%x result: x%x " 4401 "sid: x%x did: x%x oxid: x%x " 4402 "Data: x%x x%x x%x\n", 4403 cmd->cmnd[0], 4404 cmd->device ? cmd->device->id : 0xffff, 4405 cmd->device ? cmd->device->lun : 0xffff, 4406 lpfc_cmd->status, lpfc_cmd->result, 4407 vport->fc_myDID, 4408 (ndlp) ? ndlp->nlp_DID : 0, 4409 lpfc_cmd->cur_iocbq.sli4_xritag, 4410 wcqe->parameter, 4411 wcqe->total_data_placed, 4412 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4413 fallthrough; 4414 default: 4415 if (lpfc_cmd->status >= IOSTAT_CNT) 4416 lpfc_cmd->status = IOSTAT_DEFAULT; 4417 cmd->result = DID_ERROR << 16; 4418 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 4419 "9037 FCP Completion Error: xri %x " 4420 "status x%x result x%x [x%x] " 4421 "placed x%x\n", 4422 lpfc_cmd->cur_iocbq.sli4_xritag, 4423 lpfc_cmd->status, lpfc_cmd->result, 4424 wcqe->parameter, 4425 wcqe->total_data_placed); 4426 } 4427 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4428 u32 *lp = (u32 *)cmd->sense_buffer; 4429 4430 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4431 "9039 Iodone <%d/%llu> cmd x%px, error " 4432 "x%x SNS x%x x%x Data: x%x x%x\n", 4433 cmd->device->id, cmd->device->lun, cmd, 4434 cmd->result, *lp, *(lp + 3), cmd->retries, 4435 scsi_get_resid(cmd)); 4436 } 4437 4438 lpfc_update_stats(vport, lpfc_cmd); 4439 4440 if (vport->cfg_max_scsicmpl_time && 4441 time_after(jiffies, lpfc_cmd->start_time + 4442 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4443 spin_lock_irqsave(shost->host_lock, flags); 4444 if (ndlp) { 4445 if (ndlp->cmd_qdepth > 4446 atomic_read(&ndlp->cmd_pending) && 4447 (atomic_read(&ndlp->cmd_pending) > 4448 LPFC_MIN_TGT_QDEPTH) && 4449 (cmd->cmnd[0] == READ_10 || 4450 cmd->cmnd[0] == WRITE_10)) 4451 ndlp->cmd_qdepth = 4452 atomic_read(&ndlp->cmd_pending); 4453 4454 ndlp->last_change_time = jiffies; 4455 } 4456 spin_unlock_irqrestore(shost->host_lock, flags); 4457 } 4458 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4459 4460 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4461 if (lpfc_cmd->ts_cmd_start) { 4462 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4463 lpfc_cmd->ts_data_io = ktime_get_ns(); 4464 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4465 lpfc_io_ktime(phba, lpfc_cmd); 4466 } 4467 #endif 4468 if (likely(!wait_xb_clr)) 4469 lpfc_cmd->pCmd = NULL; 4470 spin_unlock(&lpfc_cmd->buf_lock); 4471 4472 /* Check if IO qualified for CMF */ 4473 if (phba->cmf_active_mode != LPFC_CFG_OFF && 4474 cmd->sc_data_direction == DMA_FROM_DEVICE && 4475 (scsi_sg_count(cmd))) { 4476 /* Used when calculating average latency */ 4477 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start; 4478 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost); 4479 } 4480 4481 if (wait_xb_clr) 4482 goto out; 4483 4484 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4485 cmd->scsi_done(cmd); 4486 4487 /* 4488 * If there is an abort thread waiting for command completion 4489 * wake up the thread. 4490 */ 4491 spin_lock(&lpfc_cmd->buf_lock); 4492 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4493 if (lpfc_cmd->waitq) 4494 wake_up(lpfc_cmd->waitq); 4495 spin_unlock(&lpfc_cmd->buf_lock); 4496 out: 4497 lpfc_release_scsi_buf(phba, lpfc_cmd); 4498 } 4499 4500 /** 4501 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4502 * @phba: The Hba for which this call is being executed. 4503 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4504 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4505 * 4506 * This routine assigns scsi command result by looking into response IOCB 4507 * status field appropriately. This routine handles QUEUE FULL condition as 4508 * well by ramping down device queue depth. 4509 **/ 4510 static void 4511 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4512 struct lpfc_iocbq *pIocbOut) 4513 { 4514 struct lpfc_io_buf *lpfc_cmd = 4515 (struct lpfc_io_buf *) pIocbIn->context1; 4516 struct lpfc_vport *vport = pIocbIn->vport; 4517 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4518 struct lpfc_nodelist *pnode = rdata->pnode; 4519 struct scsi_cmnd *cmd; 4520 unsigned long flags; 4521 struct lpfc_fast_path_event *fast_path_evt; 4522 struct Scsi_Host *shost; 4523 int idx; 4524 uint32_t logit = LOG_FCP; 4525 4526 /* Guard against abort handler being called at same time */ 4527 spin_lock(&lpfc_cmd->buf_lock); 4528 4529 /* Sanity check on return of outstanding command */ 4530 cmd = lpfc_cmd->pCmd; 4531 if (!cmd || !phba) { 4532 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4533 "2621 IO completion: Not an active IO\n"); 4534 spin_unlock(&lpfc_cmd->buf_lock); 4535 return; 4536 } 4537 4538 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4539 if (phba->sli4_hba.hdwq) 4540 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4541 4542 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4543 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4544 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4545 #endif 4546 shost = cmd->device->host; 4547 4548 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4549 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4550 /* pick up SLI4 exchange busy status from HBA */ 4551 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4552 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) 4553 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4554 4555 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4556 if (lpfc_cmd->prot_data_type) { 4557 struct scsi_dif_tuple *src = NULL; 4558 4559 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4560 /* 4561 * Used to restore any changes to protection 4562 * data for error injection. 4563 */ 4564 switch (lpfc_cmd->prot_data_type) { 4565 case LPFC_INJERR_REFTAG: 4566 src->ref_tag = 4567 lpfc_cmd->prot_data; 4568 break; 4569 case LPFC_INJERR_APPTAG: 4570 src->app_tag = 4571 (uint16_t)lpfc_cmd->prot_data; 4572 break; 4573 case LPFC_INJERR_GUARD: 4574 src->guard_tag = 4575 (uint16_t)lpfc_cmd->prot_data; 4576 break; 4577 default: 4578 break; 4579 } 4580 4581 lpfc_cmd->prot_data = 0; 4582 lpfc_cmd->prot_data_type = 0; 4583 lpfc_cmd->prot_data_segment = NULL; 4584 } 4585 #endif 4586 4587 if (unlikely(lpfc_cmd->status)) { 4588 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4589 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4590 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4591 else if (lpfc_cmd->status >= IOSTAT_CNT) 4592 lpfc_cmd->status = IOSTAT_DEFAULT; 4593 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4594 !lpfc_cmd->fcp_rsp->rspStatus3 && 4595 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4596 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4597 logit = 0; 4598 else 4599 logit = LOG_FCP | LOG_FCP_UNDER; 4600 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4601 "9030 FCP cmd x%x failed <%d/%lld> " 4602 "status: x%x result: x%x " 4603 "sid: x%x did: x%x oxid: x%x " 4604 "Data: x%x x%x\n", 4605 cmd->cmnd[0], 4606 cmd->device ? cmd->device->id : 0xffff, 4607 cmd->device ? cmd->device->lun : 0xffff, 4608 lpfc_cmd->status, lpfc_cmd->result, 4609 vport->fc_myDID, 4610 (pnode) ? pnode->nlp_DID : 0, 4611 phba->sli_rev == LPFC_SLI_REV4 ? 4612 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4613 pIocbOut->iocb.ulpContext, 4614 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4615 4616 switch (lpfc_cmd->status) { 4617 case IOSTAT_FCP_RSP_ERROR: 4618 /* Call FCP RSP handler to determine result */ 4619 lpfc_handle_fcp_err(vport, lpfc_cmd, 4620 pIocbOut->iocb.un.fcpi.fcpi_parm); 4621 break; 4622 case IOSTAT_NPORT_BSY: 4623 case IOSTAT_FABRIC_BSY: 4624 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4625 fast_path_evt = lpfc_alloc_fast_evt(phba); 4626 if (!fast_path_evt) 4627 break; 4628 fast_path_evt->un.fabric_evt.event_type = 4629 FC_REG_FABRIC_EVENT; 4630 fast_path_evt->un.fabric_evt.subcategory = 4631 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4632 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4633 if (pnode) { 4634 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4635 &pnode->nlp_portname, 4636 sizeof(struct lpfc_name)); 4637 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4638 &pnode->nlp_nodename, 4639 sizeof(struct lpfc_name)); 4640 } 4641 fast_path_evt->vport = vport; 4642 fast_path_evt->work_evt.evt = 4643 LPFC_EVT_FASTPATH_MGMT_EVT; 4644 spin_lock_irqsave(&phba->hbalock, flags); 4645 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4646 &phba->work_list); 4647 spin_unlock_irqrestore(&phba->hbalock, flags); 4648 lpfc_worker_wake_up(phba); 4649 break; 4650 case IOSTAT_LOCAL_REJECT: 4651 case IOSTAT_REMOTE_STOP: 4652 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4653 lpfc_cmd->result == 4654 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4655 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4656 lpfc_cmd->result == 4657 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4658 cmd->result = DID_NO_CONNECT << 16; 4659 break; 4660 } 4661 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4662 lpfc_cmd->result == IOERR_NO_RESOURCES || 4663 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4664 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4665 cmd->result = DID_REQUEUE << 16; 4666 break; 4667 } 4668 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4669 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4670 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4671 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4672 /* 4673 * This is a response for a BG enabled 4674 * cmd. Parse BG error 4675 */ 4676 lpfc_parse_bg_err(phba, lpfc_cmd, 4677 pIocbOut); 4678 break; 4679 } else { 4680 lpfc_printf_vlog(vport, KERN_WARNING, 4681 LOG_BG, 4682 "9031 non-zero BGSTAT " 4683 "on unprotected cmd\n"); 4684 } 4685 } 4686 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4687 && (phba->sli_rev == LPFC_SLI_REV4) 4688 && pnode) { 4689 /* This IO was aborted by the target, we don't 4690 * know the rxid and because we did not send the 4691 * ABTS we cannot generate and RRQ. 4692 */ 4693 lpfc_set_rrq_active(phba, pnode, 4694 lpfc_cmd->cur_iocbq.sli4_lxritag, 4695 0, 0); 4696 } 4697 fallthrough; 4698 default: 4699 cmd->result = DID_ERROR << 16; 4700 break; 4701 } 4702 4703 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4704 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4705 SAM_STAT_BUSY; 4706 } else 4707 cmd->result = DID_OK << 16; 4708 4709 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4710 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4711 4712 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4713 "0710 Iodone <%d/%llu> cmd x%px, error " 4714 "x%x SNS x%x x%x Data: x%x x%x\n", 4715 cmd->device->id, cmd->device->lun, cmd, 4716 cmd->result, *lp, *(lp + 3), cmd->retries, 4717 scsi_get_resid(cmd)); 4718 } 4719 4720 lpfc_update_stats(vport, lpfc_cmd); 4721 if (vport->cfg_max_scsicmpl_time && 4722 time_after(jiffies, lpfc_cmd->start_time + 4723 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4724 spin_lock_irqsave(shost->host_lock, flags); 4725 if (pnode) { 4726 if (pnode->cmd_qdepth > 4727 atomic_read(&pnode->cmd_pending) && 4728 (atomic_read(&pnode->cmd_pending) > 4729 LPFC_MIN_TGT_QDEPTH) && 4730 ((cmd->cmnd[0] == READ_10) || 4731 (cmd->cmnd[0] == WRITE_10))) 4732 pnode->cmd_qdepth = 4733 atomic_read(&pnode->cmd_pending); 4734 4735 pnode->last_change_time = jiffies; 4736 } 4737 spin_unlock_irqrestore(shost->host_lock, flags); 4738 } 4739 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4740 4741 lpfc_cmd->pCmd = NULL; 4742 spin_unlock(&lpfc_cmd->buf_lock); 4743 4744 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4745 if (lpfc_cmd->ts_cmd_start) { 4746 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4747 lpfc_cmd->ts_data_io = ktime_get_ns(); 4748 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4749 lpfc_io_ktime(phba, lpfc_cmd); 4750 } 4751 #endif 4752 4753 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4754 cmd->scsi_done(cmd); 4755 4756 /* 4757 * If there is an abort thread waiting for command completion 4758 * wake up the thread. 4759 */ 4760 spin_lock(&lpfc_cmd->buf_lock); 4761 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4762 if (lpfc_cmd->waitq) 4763 wake_up(lpfc_cmd->waitq); 4764 spin_unlock(&lpfc_cmd->buf_lock); 4765 4766 lpfc_release_scsi_buf(phba, lpfc_cmd); 4767 } 4768 4769 /** 4770 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4771 * @vport: Pointer to vport object. 4772 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4773 * @tmo: timeout value for the IO 4774 * 4775 * Based on the data-direction of the command, initialize IOCB 4776 * in the I/O buffer. Fill in the IOCB fields which are independent 4777 * of the scsi buffer 4778 * 4779 * RETURNS 0 - SUCCESS, 4780 **/ 4781 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4782 struct lpfc_io_buf *lpfc_cmd, 4783 uint8_t tmo) 4784 { 4785 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4786 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4787 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4788 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4789 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4790 int datadir = scsi_cmnd->sc_data_direction; 4791 u32 fcpdl; 4792 4793 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4794 4795 /* 4796 * There are three possibilities here - use scatter-gather segment, use 4797 * the single mapping, or neither. Start the lpfc command prep by 4798 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4799 * data bde entry. 4800 */ 4801 if (scsi_sg_count(scsi_cmnd)) { 4802 if (datadir == DMA_TO_DEVICE) { 4803 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4804 iocb_cmd->ulpPU = PARM_READ_CHECK; 4805 if (vport->cfg_first_burst_size && 4806 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4807 u32 xrdy_len; 4808 4809 fcpdl = scsi_bufflen(scsi_cmnd); 4810 xrdy_len = min(fcpdl, 4811 vport->cfg_first_burst_size); 4812 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4813 } 4814 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4815 } else { 4816 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4817 iocb_cmd->ulpPU = PARM_READ_CHECK; 4818 fcp_cmnd->fcpCntl3 = READ_DATA; 4819 } 4820 } else { 4821 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4822 iocb_cmd->un.fcpi.fcpi_parm = 0; 4823 iocb_cmd->ulpPU = 0; 4824 fcp_cmnd->fcpCntl3 = 0; 4825 } 4826 4827 /* 4828 * Finish initializing those IOCB fields that are independent 4829 * of the scsi_cmnd request_buffer 4830 */ 4831 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4832 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4833 piocbq->iocb.ulpFCP2Rcvy = 1; 4834 else 4835 piocbq->iocb.ulpFCP2Rcvy = 0; 4836 4837 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4838 piocbq->context1 = lpfc_cmd; 4839 if (!piocbq->iocb_cmpl) 4840 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4841 piocbq->iocb.ulpTimeout = tmo; 4842 piocbq->vport = vport; 4843 return 0; 4844 } 4845 4846 /** 4847 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4848 * @vport: Pointer to vport object. 4849 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4850 * @tmo: timeout value for the IO 4851 * 4852 * Based on the data-direction of the command copy WQE template 4853 * to I/O buffer WQE. Fill in the WQE fields which are independent 4854 * of the scsi buffer 4855 * 4856 * RETURNS 0 - SUCCESS, 4857 **/ 4858 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4859 struct lpfc_io_buf *lpfc_cmd, 4860 uint8_t tmo) 4861 { 4862 struct lpfc_hba *phba = vport->phba; 4863 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4864 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4865 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4866 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4867 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4868 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4869 u16 idx = lpfc_cmd->hdwq_no; 4870 int datadir = scsi_cmnd->sc_data_direction; 4871 4872 hdwq = &phba->sli4_hba.hdwq[idx]; 4873 4874 /* Initialize 64 bytes only */ 4875 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4876 4877 /* 4878 * There are three possibilities here - use scatter-gather segment, use 4879 * the single mapping, or neither. 4880 */ 4881 if (scsi_sg_count(scsi_cmnd)) { 4882 if (datadir == DMA_TO_DEVICE) { 4883 /* From the iwrite template, initialize words 7 - 11 */ 4884 memcpy(&wqe->words[7], 4885 &lpfc_iwrite_cmd_template.words[7], 4886 sizeof(uint32_t) * 5); 4887 4888 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4889 if (hdwq) 4890 hdwq->scsi_cstat.output_requests++; 4891 } else { 4892 /* From the iread template, initialize words 7 - 11 */ 4893 memcpy(&wqe->words[7], 4894 &lpfc_iread_cmd_template.words[7], 4895 sizeof(uint32_t) * 5); 4896 4897 /* Word 7 */ 4898 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4899 4900 fcp_cmnd->fcpCntl3 = READ_DATA; 4901 if (hdwq) 4902 hdwq->scsi_cstat.input_requests++; 4903 4904 /* For a CMF Managed port, iod must be zero'ed */ 4905 if (phba->cmf_active_mode == LPFC_CFG_MANAGED) 4906 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, 4907 LPFC_WQE_IOD_NONE); 4908 } 4909 } else { 4910 /* From the icmnd template, initialize words 4 - 11 */ 4911 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4912 sizeof(uint32_t) * 8); 4913 4914 /* Word 7 */ 4915 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4916 4917 fcp_cmnd->fcpCntl3 = 0; 4918 if (hdwq) 4919 hdwq->scsi_cstat.control_requests++; 4920 } 4921 4922 /* 4923 * Finish initializing those WQE fields that are independent 4924 * of the request_buffer 4925 */ 4926 4927 /* Word 3 */ 4928 bf_set(payload_offset_len, &wqe->fcp_icmd, 4929 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4930 4931 /* Word 6 */ 4932 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4933 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4934 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4935 4936 /* Word 7*/ 4937 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4938 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4939 4940 bf_set(wqe_class, &wqe->generic.wqe_com, 4941 (pnode->nlp_fcp_info & 0x0f)); 4942 4943 /* Word 8 */ 4944 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4945 4946 /* Word 9 */ 4947 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4948 4949 pwqeq->vport = vport; 4950 pwqeq->vport = vport; 4951 pwqeq->context1 = lpfc_cmd; 4952 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4953 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4954 4955 return 0; 4956 } 4957 4958 /** 4959 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4960 * @vport: The virtual port for which this call is being executed. 4961 * @lpfc_cmd: The scsi command which needs to send. 4962 * @pnode: Pointer to lpfc_nodelist. 4963 * 4964 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4965 * to transfer for device with SLI3 interface spec. 4966 **/ 4967 static int 4968 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4969 struct lpfc_nodelist *pnode) 4970 { 4971 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4972 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4973 u8 *ptr; 4974 4975 if (!pnode) 4976 return 0; 4977 4978 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4979 /* clear task management bits */ 4980 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4981 4982 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4983 &lpfc_cmd->fcp_cmnd->fcp_lun); 4984 4985 ptr = &fcp_cmnd->fcpCdb[0]; 4986 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4987 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4988 ptr += scsi_cmnd->cmd_len; 4989 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4990 } 4991 4992 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4993 4994 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4995 4996 return 0; 4997 } 4998 4999 /** 5000 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit 5001 * @vport: The virtual port for which this call is being executed. 5002 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5003 * @lun: Logical unit number. 5004 * @task_mgmt_cmd: SCSI task management command. 5005 * 5006 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 5007 * for device with SLI-3 interface spec. 5008 * 5009 * Return codes: 5010 * 0 - Error 5011 * 1 - Success 5012 **/ 5013 static int 5014 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 5015 struct lpfc_io_buf *lpfc_cmd, 5016 uint64_t lun, 5017 uint8_t task_mgmt_cmd) 5018 { 5019 struct lpfc_iocbq *piocbq; 5020 IOCB_t *piocb; 5021 struct fcp_cmnd *fcp_cmnd; 5022 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 5023 struct lpfc_nodelist *ndlp = rdata->pnode; 5024 5025 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 5026 return 0; 5027 5028 piocbq = &(lpfc_cmd->cur_iocbq); 5029 piocbq->vport = vport; 5030 5031 piocb = &piocbq->iocb; 5032 5033 fcp_cmnd = lpfc_cmd->fcp_cmnd; 5034 /* Clear out any old data in the FCP command area */ 5035 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 5036 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 5037 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 5038 if (vport->phba->sli_rev == 3 && 5039 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 5040 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 5041 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 5042 piocb->ulpContext = ndlp->nlp_rpi; 5043 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 5044 piocb->ulpContext = 5045 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 5046 } 5047 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 5048 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 5049 piocb->ulpPU = 0; 5050 piocb->un.fcpi.fcpi_parm = 0; 5051 5052 /* ulpTimeout is only one byte */ 5053 if (lpfc_cmd->timeout > 0xff) { 5054 /* 5055 * Do not timeout the command at the firmware level. 5056 * The driver will provide the timeout mechanism. 5057 */ 5058 piocb->ulpTimeout = 0; 5059 } else 5060 piocb->ulpTimeout = lpfc_cmd->timeout; 5061 5062 if (vport->phba->sli_rev == LPFC_SLI_REV4) 5063 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 5064 5065 return 1; 5066 } 5067 5068 /** 5069 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 5070 * @phba: The hba struct for which this call is being executed. 5071 * @dev_grp: The HBA PCI-Device group number. 5072 * 5073 * This routine sets up the SCSI interface API function jump table in @phba 5074 * struct. 5075 * Returns: 0 - success, -ENODEV - failure. 5076 **/ 5077 int 5078 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5079 { 5080 5081 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 5082 5083 switch (dev_grp) { 5084 case LPFC_PCI_DEV_LP: 5085 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 5086 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 5087 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 5088 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 5089 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 5090 break; 5091 case LPFC_PCI_DEV_OC: 5092 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 5093 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 5094 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 5095 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 5096 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 5097 break; 5098 default: 5099 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5100 "1418 Invalid HBA PCI-device group: 0x%x\n", 5101 dev_grp); 5102 return -ENODEV; 5103 } 5104 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 5105 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 5106 return 0; 5107 } 5108 5109 /** 5110 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 5111 * @phba: The Hba for which this call is being executed. 5112 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 5113 * @rspiocbq: Pointer to lpfc_iocbq data structure. 5114 * 5115 * This routine is IOCB completion routine for device reset and target reset 5116 * routine. This routine release scsi buffer associated with lpfc_cmd. 5117 **/ 5118 static void 5119 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 5120 struct lpfc_iocbq *cmdiocbq, 5121 struct lpfc_iocbq *rspiocbq) 5122 { 5123 struct lpfc_io_buf *lpfc_cmd = 5124 (struct lpfc_io_buf *) cmdiocbq->context1; 5125 if (lpfc_cmd) 5126 lpfc_release_scsi_buf(phba, lpfc_cmd); 5127 return; 5128 } 5129 5130 /** 5131 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 5132 * if issuing a pci_bus_reset is possibly unsafe 5133 * @phba: lpfc_hba pointer. 5134 * 5135 * Description: 5136 * Walks the bus_list to ensure only PCI devices with Emulex 5137 * vendor id, device ids that support hot reset, and only one occurrence 5138 * of function 0. 5139 * 5140 * Returns: 5141 * -EBADSLT, detected invalid device 5142 * 0, successful 5143 */ 5144 int 5145 lpfc_check_pci_resettable(struct lpfc_hba *phba) 5146 { 5147 const struct pci_dev *pdev = phba->pcidev; 5148 struct pci_dev *ptr = NULL; 5149 u8 counter = 0; 5150 5151 /* Walk the list of devices on the pci_dev's bus */ 5152 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 5153 /* Check for Emulex Vendor ID */ 5154 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5155 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5156 "8346 Non-Emulex vendor found: " 5157 "0x%04x\n", ptr->vendor); 5158 return -EBADSLT; 5159 } 5160 5161 /* Check for valid Emulex Device ID */ 5162 if (phba->sli_rev != LPFC_SLI_REV4 || 5163 phba->hba_flag & HBA_FCOE_MODE) { 5164 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5165 "8347 Incapable PCI reset device: " 5166 "0x%04x\n", ptr->device); 5167 return -EBADSLT; 5168 } 5169 5170 /* Check for only one function 0 ID to ensure only one HBA on 5171 * secondary bus 5172 */ 5173 if (ptr->devfn == 0) { 5174 if (++counter > 1) { 5175 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5176 "8348 More than one device on " 5177 "secondary bus found\n"); 5178 return -EBADSLT; 5179 } 5180 } 5181 } 5182 5183 return 0; 5184 } 5185 5186 /** 5187 * lpfc_info - Info entry point of scsi_host_template data structure 5188 * @host: The scsi host for which this call is being executed. 5189 * 5190 * This routine provides module information about hba. 5191 * 5192 * Reutrn code: 5193 * Pointer to char - Success. 5194 **/ 5195 const char * 5196 lpfc_info(struct Scsi_Host *host) 5197 { 5198 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5199 struct lpfc_hba *phba = vport->phba; 5200 int link_speed = 0; 5201 static char lpfcinfobuf[384]; 5202 char tmp[384] = {0}; 5203 5204 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5205 if (phba && phba->pcidev){ 5206 /* Model Description */ 5207 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5208 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5209 sizeof(lpfcinfobuf)) 5210 goto buffer_done; 5211 5212 /* PCI Info */ 5213 scnprintf(tmp, sizeof(tmp), 5214 " on PCI bus %02x device %02x irq %d", 5215 phba->pcidev->bus->number, phba->pcidev->devfn, 5216 phba->pcidev->irq); 5217 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5218 sizeof(lpfcinfobuf)) 5219 goto buffer_done; 5220 5221 /* Port Number */ 5222 if (phba->Port[0]) { 5223 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5224 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5225 sizeof(lpfcinfobuf)) 5226 goto buffer_done; 5227 } 5228 5229 /* Link Speed */ 5230 link_speed = lpfc_sli_port_speed_get(phba); 5231 if (link_speed != 0) { 5232 scnprintf(tmp, sizeof(tmp), 5233 " Logical Link Speed: %d Mbps", link_speed); 5234 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5235 sizeof(lpfcinfobuf)) 5236 goto buffer_done; 5237 } 5238 5239 /* PCI resettable */ 5240 if (!lpfc_check_pci_resettable(phba)) { 5241 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5242 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5243 } 5244 } 5245 5246 buffer_done: 5247 return lpfcinfobuf; 5248 } 5249 5250 /** 5251 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5252 * @phba: The Hba for which this call is being executed. 5253 * 5254 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5255 * The default value of cfg_poll_tmo is 10 milliseconds. 5256 **/ 5257 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5258 { 5259 unsigned long poll_tmo_expires = 5260 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5261 5262 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5263 mod_timer(&phba->fcp_poll_timer, 5264 poll_tmo_expires); 5265 } 5266 5267 /** 5268 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5269 * @phba: The Hba for which this call is being executed. 5270 * 5271 * This routine starts the fcp_poll_timer of @phba. 5272 **/ 5273 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5274 { 5275 lpfc_poll_rearm_timer(phba); 5276 } 5277 5278 /** 5279 * lpfc_poll_timeout - Restart polling timer 5280 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5281 * 5282 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5283 * and FCP Ring interrupt is disable. 5284 **/ 5285 void lpfc_poll_timeout(struct timer_list *t) 5286 { 5287 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5288 5289 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5290 lpfc_sli_handle_fast_ring_event(phba, 5291 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5292 5293 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5294 lpfc_poll_rearm_timer(phba); 5295 } 5296 } 5297 5298 /* 5299 * lpfc_get_vmid_from_hashtable - search the UUID in the hash table 5300 * @vport: The virtual port for which this call is being executed. 5301 * @hash: calculated hash value 5302 * @buf: uuid associated with the VE 5303 * Return the VMID entry associated with the UUID 5304 * Make sure to acquire the appropriate lock before invoking this routine. 5305 */ 5306 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, 5307 u32 hash, u8 *buf) 5308 { 5309 struct lpfc_vmid *vmp; 5310 5311 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { 5312 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) 5313 return vmp; 5314 } 5315 return NULL; 5316 } 5317 5318 /* 5319 * lpfc_put_vmid_in_hashtable - put the VMID in the hash table 5320 * @vport: The virtual port for which this call is being executed. 5321 * @hash - calculated hash value 5322 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5323 * 5324 * This routine will insert the newly acquired VMID entity in the hash table. 5325 * Make sure to acquire the appropriate lock before invoking this routine. 5326 */ 5327 static void 5328 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, 5329 struct lpfc_vmid *vmp) 5330 { 5331 hash_add(vport->hash_table, &vmp->hnode, hash); 5332 } 5333 5334 /* 5335 * lpfc_vmid_hash_fn - create a hash value of the UUID 5336 * @vmid: uuid associated with the VE 5337 * @len: length of the VMID string 5338 * Returns the calculated hash value 5339 */ 5340 int lpfc_vmid_hash_fn(const char *vmid, int len) 5341 { 5342 int c; 5343 int hash = 0; 5344 5345 if (len == 0) 5346 return 0; 5347 while (len--) { 5348 c = *vmid++; 5349 if (c >= 'A' && c <= 'Z') 5350 c += 'a' - 'A'; 5351 5352 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + 5353 (c >> LPFC_VMID_HASH_SHIFT)) * 19; 5354 } 5355 5356 return hash & LPFC_VMID_HASH_MASK; 5357 } 5358 5359 /* 5360 * lpfc_vmid_update_entry - update the vmid entry in the hash table 5361 * @vport: The virtual port for which this call is being executed. 5362 * @cmd: address of scsi cmd descriptor 5363 * @vmp: Pointer to a VMID entry representing a VM sending I/O 5364 * @tag: VMID tag 5365 */ 5366 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd 5367 *cmd, struct lpfc_vmid *vmp, 5368 union lpfc_vmid_io_tag *tag) 5369 { 5370 u64 *lta; 5371 5372 if (vport->vmid_priority_tagging) 5373 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; 5374 else 5375 tag->app_id = vmp->un.app_id; 5376 5377 if (cmd->sc_data_direction == DMA_TO_DEVICE) 5378 vmp->io_wr_cnt++; 5379 else 5380 vmp->io_rd_cnt++; 5381 5382 /* update the last access timestamp in the table */ 5383 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); 5384 *lta = jiffies; 5385 } 5386 5387 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, 5388 struct lpfc_vmid *vmid) 5389 { 5390 u32 hash; 5391 struct lpfc_vmid *pvmid; 5392 5393 if (vport->port_type == LPFC_PHYSICAL_PORT) { 5394 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5395 } else { 5396 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); 5397 pvmid = 5398 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, 5399 vmid->host_vmid); 5400 if (pvmid) 5401 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; 5402 else 5403 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); 5404 } 5405 } 5406 5407 /* 5408 * lpfc_vmid_get_appid - get the VMID associated with the UUID 5409 * @vport: The virtual port for which this call is being executed. 5410 * @uuid: UUID associated with the VE 5411 * @cmd: address of scsi_cmd descriptor 5412 * @tag: VMID tag 5413 * Returns status of the function 5414 */ 5415 static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct 5416 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag) 5417 { 5418 struct lpfc_vmid *vmp = NULL; 5419 int hash, len, rc, i; 5420 5421 /* check if QFPA is complete */ 5422 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag & 5423 LPFC_VMID_QFPA_CMPL)) { 5424 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; 5425 return -EAGAIN; 5426 } 5427 5428 /* search if the UUID has already been mapped to the VMID */ 5429 len = strlen(uuid); 5430 hash = lpfc_vmid_hash_fn(uuid, len); 5431 5432 /* search for the VMID in the table */ 5433 read_lock(&vport->vmid_lock); 5434 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5435 5436 /* if found, check if its already registered */ 5437 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5438 read_unlock(&vport->vmid_lock); 5439 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5440 rc = 0; 5441 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || 5442 vmp->flag & LPFC_VMID_DE_REGISTER)) { 5443 /* else if register or dereg request has already been sent */ 5444 /* Hence VMID tag will not be added for this I/O */ 5445 read_unlock(&vport->vmid_lock); 5446 rc = -EBUSY; 5447 } else { 5448 /* The VMID was not found in the hashtable. At this point, */ 5449 /* drop the read lock first before proceeding further */ 5450 read_unlock(&vport->vmid_lock); 5451 /* start the process to obtain one as per the */ 5452 /* type of the VMID indicated */ 5453 write_lock(&vport->vmid_lock); 5454 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); 5455 5456 /* while the read lock was released, in case the entry was */ 5457 /* added by other context or is in process of being added */ 5458 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { 5459 lpfc_vmid_update_entry(vport, cmd, vmp, tag); 5460 write_unlock(&vport->vmid_lock); 5461 return 0; 5462 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { 5463 write_unlock(&vport->vmid_lock); 5464 return -EBUSY; 5465 } 5466 5467 /* else search and allocate a free slot in the hash table */ 5468 if (vport->cur_vmid_cnt < vport->max_vmid) { 5469 for (i = 0; i < vport->max_vmid; i++) { 5470 vmp = vport->vmid + i; 5471 if (vmp->flag == LPFC_VMID_SLOT_FREE) 5472 break; 5473 } 5474 if (i == vport->max_vmid) 5475 vmp = NULL; 5476 } else { 5477 vmp = NULL; 5478 } 5479 5480 if (!vmp) { 5481 write_unlock(&vport->vmid_lock); 5482 return -ENOMEM; 5483 } 5484 5485 /* Add the vmid and register */ 5486 lpfc_put_vmid_in_hashtable(vport, hash, vmp); 5487 vmp->vmid_len = len; 5488 memcpy(vmp->host_vmid, uuid, vmp->vmid_len); 5489 vmp->io_rd_cnt = 0; 5490 vmp->io_wr_cnt = 0; 5491 vmp->flag = LPFC_VMID_SLOT_USED; 5492 5493 vmp->delete_inactive = 5494 vport->vmid_inactivity_timeout ? 1 : 0; 5495 5496 /* if type priority tag, get next available VMID */ 5497 if (lpfc_vmid_is_type_priority_tag(vport)) 5498 lpfc_vmid_assign_cs_ctl(vport, vmp); 5499 5500 /* allocate the per cpu variable for holding */ 5501 /* the last access time stamp only if VMID is enabled */ 5502 if (!vmp->last_io_time) 5503 vmp->last_io_time = __alloc_percpu(sizeof(u64), 5504 __alignof__(struct 5505 lpfc_vmid)); 5506 if (!vmp->last_io_time) { 5507 hash_del(&vmp->hnode); 5508 vmp->flag = LPFC_VMID_SLOT_FREE; 5509 write_unlock(&vport->vmid_lock); 5510 return -EIO; 5511 } 5512 5513 write_unlock(&vport->vmid_lock); 5514 5515 /* complete transaction with switch */ 5516 if (lpfc_vmid_is_type_priority_tag(vport)) 5517 rc = lpfc_vmid_uvem(vport, vmp, true); 5518 else 5519 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); 5520 if (!rc) { 5521 write_lock(&vport->vmid_lock); 5522 vport->cur_vmid_cnt++; 5523 vmp->flag |= LPFC_VMID_REQ_REGISTER; 5524 write_unlock(&vport->vmid_lock); 5525 } else { 5526 write_lock(&vport->vmid_lock); 5527 hash_del(&vmp->hnode); 5528 vmp->flag = LPFC_VMID_SLOT_FREE; 5529 free_percpu(vmp->last_io_time); 5530 write_unlock(&vport->vmid_lock); 5531 return -EIO; 5532 } 5533 5534 /* finally, enable the idle timer once */ 5535 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { 5536 mod_timer(&vport->phba->inactive_vmid_poll, 5537 jiffies + 5538 msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); 5539 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; 5540 } 5541 } 5542 return rc; 5543 } 5544 5545 /* 5546 * lpfc_is_command_vm_io - get the UUID from blk cgroup 5547 * @cmd: Pointer to scsi_cmnd data structure 5548 * Returns UUID if present, otherwise NULL 5549 */ 5550 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) 5551 { 5552 struct bio *bio = scsi_cmd_to_rq(cmd)->bio; 5553 5554 return bio ? blkcg_get_fc_appid(bio) : NULL; 5555 } 5556 5557 /** 5558 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5559 * @shost: kernel scsi host pointer. 5560 * @cmnd: Pointer to scsi_cmnd data structure. 5561 * 5562 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5563 * This routine prepares an IOCB from scsi command and provides to firmware. 5564 * The @done callback is invoked after driver finished processing the command. 5565 * 5566 * Return value : 5567 * 0 - Success 5568 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5569 **/ 5570 static int 5571 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5572 { 5573 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5574 struct lpfc_hba *phba = vport->phba; 5575 struct lpfc_rport_data *rdata; 5576 struct lpfc_nodelist *ndlp; 5577 struct lpfc_io_buf *lpfc_cmd; 5578 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5579 int err, idx; 5580 u8 *uuid = NULL; 5581 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5582 uint64_t start = 0L; 5583 5584 if (phba->ktime_on) 5585 start = ktime_get_ns(); 5586 #endif 5587 start = ktime_get_ns(); 5588 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5589 5590 /* sanity check on references */ 5591 if (unlikely(!rdata) || unlikely(!rport)) 5592 goto out_fail_command; 5593 5594 err = fc_remote_port_chkready(rport); 5595 if (err) { 5596 cmnd->result = err; 5597 goto out_fail_command; 5598 } 5599 ndlp = rdata->pnode; 5600 5601 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5602 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5603 5604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5605 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5606 " op:%02x str=%s without registering for" 5607 " BlockGuard - Rejecting command\n", 5608 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5609 dif_op_str[scsi_get_prot_op(cmnd)]); 5610 goto out_fail_command; 5611 } 5612 5613 /* 5614 * Catch race where our node has transitioned, but the 5615 * transport is still transitioning. 5616 */ 5617 if (!ndlp) 5618 goto out_tgt_busy1; 5619 5620 /* Check if IO qualifies for CMF */ 5621 if (phba->cmf_active_mode != LPFC_CFG_OFF && 5622 cmnd->sc_data_direction == DMA_FROM_DEVICE && 5623 (scsi_sg_count(cmnd))) { 5624 /* Latency start time saved in rx_cmd_start later in routine */ 5625 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd)); 5626 if (err) 5627 goto out_tgt_busy1; 5628 } 5629 5630 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5631 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5632 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5633 "3377 Target Queue Full, scsi Id:%d " 5634 "Qdepth:%d Pending command:%d" 5635 " WWNN:%02x:%02x:%02x:%02x:" 5636 "%02x:%02x:%02x:%02x, " 5637 " WWPN:%02x:%02x:%02x:%02x:" 5638 "%02x:%02x:%02x:%02x", 5639 ndlp->nlp_sid, ndlp->cmd_qdepth, 5640 atomic_read(&ndlp->cmd_pending), 5641 ndlp->nlp_nodename.u.wwn[0], 5642 ndlp->nlp_nodename.u.wwn[1], 5643 ndlp->nlp_nodename.u.wwn[2], 5644 ndlp->nlp_nodename.u.wwn[3], 5645 ndlp->nlp_nodename.u.wwn[4], 5646 ndlp->nlp_nodename.u.wwn[5], 5647 ndlp->nlp_nodename.u.wwn[6], 5648 ndlp->nlp_nodename.u.wwn[7], 5649 ndlp->nlp_portname.u.wwn[0], 5650 ndlp->nlp_portname.u.wwn[1], 5651 ndlp->nlp_portname.u.wwn[2], 5652 ndlp->nlp_portname.u.wwn[3], 5653 ndlp->nlp_portname.u.wwn[4], 5654 ndlp->nlp_portname.u.wwn[5], 5655 ndlp->nlp_portname.u.wwn[6], 5656 ndlp->nlp_portname.u.wwn[7]); 5657 goto out_tgt_busy2; 5658 } 5659 } 5660 5661 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5662 if (lpfc_cmd == NULL) { 5663 lpfc_rampdown_queue_depth(phba); 5664 5665 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5666 "0707 driver's buffer pool is empty, " 5667 "IO busied\n"); 5668 goto out_host_busy; 5669 } 5670 lpfc_cmd->rx_cmd_start = start; 5671 5672 /* 5673 * Store the midlayer's command structure for the completion phase 5674 * and complete the command initialization. 5675 */ 5676 lpfc_cmd->pCmd = cmnd; 5677 lpfc_cmd->rdata = rdata; 5678 lpfc_cmd->ndlp = ndlp; 5679 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 5680 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5681 5682 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5683 if (err) 5684 goto out_host_busy_release_buf; 5685 5686 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5687 if (vport->phba->cfg_enable_bg) { 5688 lpfc_printf_vlog(vport, 5689 KERN_INFO, LOG_SCSI_CMD, 5690 "9033 BLKGRD: rcvd %s cmd:x%x " 5691 "reftag x%x cnt %u pt %x\n", 5692 dif_op_str[scsi_get_prot_op(cmnd)], 5693 cmnd->cmnd[0], 5694 scsi_prot_ref_tag(cmnd), 5695 scsi_logical_block_count(cmnd), 5696 (cmnd->cmnd[1]>>5)); 5697 } 5698 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5699 } else { 5700 if (vport->phba->cfg_enable_bg) { 5701 lpfc_printf_vlog(vport, 5702 KERN_INFO, LOG_SCSI_CMD, 5703 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5704 "x%x reftag x%x cnt %u pt %x\n", 5705 cmnd->cmnd[0], 5706 scsi_prot_ref_tag(cmnd), 5707 scsi_logical_block_count(cmnd), 5708 (cmnd->cmnd[1]>>5)); 5709 } 5710 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5711 } 5712 5713 if (unlikely(err)) { 5714 if (err == 2) { 5715 cmnd->result = DID_ERROR << 16; 5716 goto out_fail_command_release_buf; 5717 } 5718 goto out_host_busy_free_buf; 5719 } 5720 5721 5722 /* check the necessary and sufficient condition to support VMID */ 5723 if (lpfc_is_vmid_enabled(phba) && 5724 (ndlp->vmid_support || 5725 phba->pport->vmid_priority_tagging == 5726 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { 5727 /* is the I/O generated by a VM, get the associated virtual */ 5728 /* entity id */ 5729 uuid = lpfc_is_command_vm_io(cmnd); 5730 5731 if (uuid) { 5732 err = lpfc_vmid_get_appid(vport, uuid, cmnd, 5733 (union lpfc_vmid_io_tag *) 5734 &lpfc_cmd->cur_iocbq.vmid_tag); 5735 if (!err) 5736 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID; 5737 } 5738 } 5739 5740 atomic_inc(&ndlp->cmd_pending); 5741 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5742 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5743 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5744 #endif 5745 /* Issue I/O to adapter */ 5746 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, 5747 &lpfc_cmd->cur_iocbq, 5748 SLI_IOCB_RET_IOCB); 5749 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5750 if (start) { 5751 lpfc_cmd->ts_cmd_start = start; 5752 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5753 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5754 } else { 5755 lpfc_cmd->ts_cmd_start = 0; 5756 } 5757 #endif 5758 if (err) { 5759 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5760 "3376 FCP could not issue IOCB err %x " 5761 "FCP cmd x%x <%d/%llu> " 5762 "sid: x%x did: x%x oxid: x%x " 5763 "Data: x%x x%x x%x x%x\n", 5764 err, cmnd->cmnd[0], 5765 cmnd->device ? cmnd->device->id : 0xffff, 5766 cmnd->device ? cmnd->device->lun : (u64)-1, 5767 vport->fc_myDID, ndlp->nlp_DID, 5768 phba->sli_rev == LPFC_SLI_REV4 ? 5769 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 5770 phba->sli_rev == LPFC_SLI_REV4 ? 5771 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5772 lpfc_cmd->cur_iocbq.iocb.ulpContext, 5773 lpfc_cmd->cur_iocbq.iotag, 5774 phba->sli_rev == LPFC_SLI_REV4 ? 5775 bf_get(wqe_tmo, 5776 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : 5777 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, 5778 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); 5779 5780 goto out_host_busy_free_buf; 5781 } 5782 5783 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5784 lpfc_sli_handle_fast_ring_event(phba, 5785 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5786 5787 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5788 lpfc_poll_rearm_timer(phba); 5789 } 5790 5791 if (phba->cfg_xri_rebalancing) 5792 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5793 5794 return 0; 5795 5796 out_host_busy_free_buf: 5797 idx = lpfc_cmd->hdwq_no; 5798 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5799 if (phba->sli4_hba.hdwq) { 5800 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5801 case WRITE_DATA: 5802 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5803 break; 5804 case READ_DATA: 5805 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5806 break; 5807 default: 5808 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5809 } 5810 } 5811 out_host_busy_release_buf: 5812 lpfc_release_scsi_buf(phba, lpfc_cmd); 5813 out_host_busy: 5814 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5815 shost); 5816 return SCSI_MLQUEUE_HOST_BUSY; 5817 5818 out_tgt_busy2: 5819 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5820 shost); 5821 out_tgt_busy1: 5822 return SCSI_MLQUEUE_TARGET_BUSY; 5823 5824 out_fail_command_release_buf: 5825 lpfc_release_scsi_buf(phba, lpfc_cmd); 5826 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd), 5827 shost); 5828 5829 out_fail_command: 5830 cmnd->scsi_done(cmnd); 5831 return 0; 5832 } 5833 5834 /* 5835 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport 5836 * @vport: The virtual port for which this call is being executed. 5837 */ 5838 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) 5839 { 5840 u32 bucket; 5841 struct lpfc_vmid *cur; 5842 5843 if (vport->port_type == LPFC_PHYSICAL_PORT) 5844 del_timer_sync(&vport->phba->inactive_vmid_poll); 5845 5846 kfree(vport->qfpa_res); 5847 kfree(vport->vmid_priority.vmid_range); 5848 kfree(vport->vmid); 5849 5850 if (!hash_empty(vport->hash_table)) 5851 hash_for_each(vport->hash_table, bucket, cur, hnode) 5852 hash_del(&cur->hnode); 5853 5854 vport->qfpa_res = NULL; 5855 vport->vmid_priority.vmid_range = NULL; 5856 vport->vmid = NULL; 5857 vport->cur_vmid_cnt = 0; 5858 } 5859 5860 /** 5861 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5862 * @cmnd: Pointer to scsi_cmnd data structure. 5863 * 5864 * This routine aborts @cmnd pending in base driver. 5865 * 5866 * Return code : 5867 * 0x2003 - Error 5868 * 0x2002 - Success 5869 **/ 5870 static int 5871 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5872 { 5873 struct Scsi_Host *shost = cmnd->device->host; 5874 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5875 struct lpfc_hba *phba = vport->phba; 5876 struct lpfc_iocbq *iocb; 5877 struct lpfc_io_buf *lpfc_cmd; 5878 int ret = SUCCESS, status = 0; 5879 struct lpfc_sli_ring *pring_s4 = NULL; 5880 struct lpfc_sli_ring *pring = NULL; 5881 int ret_val; 5882 unsigned long flags; 5883 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5884 5885 status = fc_block_scsi_eh(cmnd); 5886 if (status != 0 && status != SUCCESS) 5887 return status; 5888 5889 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5890 if (!lpfc_cmd) 5891 return ret; 5892 5893 spin_lock_irqsave(&phba->hbalock, flags); 5894 /* driver queued commands are in process of being flushed */ 5895 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5896 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5897 "3168 SCSI Layer abort requested I/O has been " 5898 "flushed by LLD.\n"); 5899 ret = FAILED; 5900 goto out_unlock; 5901 } 5902 5903 /* Guard against IO completion being called at same time */ 5904 spin_lock(&lpfc_cmd->buf_lock); 5905 5906 if (!lpfc_cmd->pCmd) { 5907 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5908 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5909 "x%x ID %d LUN %llu\n", 5910 SUCCESS, cmnd->device->id, cmnd->device->lun); 5911 goto out_unlock_buf; 5912 } 5913 5914 iocb = &lpfc_cmd->cur_iocbq; 5915 if (phba->sli_rev == LPFC_SLI_REV4) { 5916 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5917 if (!pring_s4) { 5918 ret = FAILED; 5919 goto out_unlock_buf; 5920 } 5921 spin_lock(&pring_s4->ring_lock); 5922 } 5923 /* the command is in process of being cancelled */ 5924 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 5925 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5926 "3169 SCSI Layer abort requested I/O has been " 5927 "cancelled by LLD.\n"); 5928 ret = FAILED; 5929 goto out_unlock_ring; 5930 } 5931 /* 5932 * If pCmd field of the corresponding lpfc_io_buf structure 5933 * points to a different SCSI command, then the driver has 5934 * already completed this command, but the midlayer did not 5935 * see the completion before the eh fired. Just return SUCCESS. 5936 */ 5937 if (lpfc_cmd->pCmd != cmnd) { 5938 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5939 "3170 SCSI Layer abort requested I/O has been " 5940 "completed by LLD.\n"); 5941 goto out_unlock_ring; 5942 } 5943 5944 BUG_ON(iocb->context1 != lpfc_cmd); 5945 5946 /* abort issued in recovery is still in progress */ 5947 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 5948 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5949 "3389 SCSI Layer I/O Abort Request is pending\n"); 5950 if (phba->sli_rev == LPFC_SLI_REV4) 5951 spin_unlock(&pring_s4->ring_lock); 5952 spin_unlock(&lpfc_cmd->buf_lock); 5953 spin_unlock_irqrestore(&phba->hbalock, flags); 5954 goto wait_for_cmpl; 5955 } 5956 5957 lpfc_cmd->waitq = &waitq; 5958 if (phba->sli_rev == LPFC_SLI_REV4) { 5959 spin_unlock(&pring_s4->ring_lock); 5960 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5961 lpfc_sli4_abort_fcp_cmpl); 5962 } else { 5963 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5964 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5965 lpfc_sli_abort_fcp_cmpl); 5966 } 5967 5968 /* Make sure HBA is alive */ 5969 lpfc_issue_hb_tmo(phba); 5970 5971 if (ret_val != IOCB_SUCCESS) { 5972 /* Indicate the IO is not being aborted by the driver. */ 5973 lpfc_cmd->waitq = NULL; 5974 spin_unlock(&lpfc_cmd->buf_lock); 5975 spin_unlock_irqrestore(&phba->hbalock, flags); 5976 ret = FAILED; 5977 goto out; 5978 } 5979 5980 /* no longer need the lock after this point */ 5981 spin_unlock(&lpfc_cmd->buf_lock); 5982 spin_unlock_irqrestore(&phba->hbalock, flags); 5983 5984 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5985 lpfc_sli_handle_fast_ring_event(phba, 5986 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5987 5988 wait_for_cmpl: 5989 /* 5990 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait 5991 * for abort to complete. 5992 */ 5993 wait_event_timeout(waitq, 5994 (lpfc_cmd->pCmd != cmnd), 5995 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5996 5997 spin_lock(&lpfc_cmd->buf_lock); 5998 5999 if (lpfc_cmd->pCmd == cmnd) { 6000 ret = FAILED; 6001 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6002 "0748 abort handler timed out waiting " 6003 "for aborting I/O (xri:x%x) to complete: " 6004 "ret %#x, ID %d, LUN %llu\n", 6005 iocb->sli4_xritag, ret, 6006 cmnd->device->id, cmnd->device->lun); 6007 } 6008 6009 lpfc_cmd->waitq = NULL; 6010 6011 spin_unlock(&lpfc_cmd->buf_lock); 6012 goto out; 6013 6014 out_unlock_ring: 6015 if (phba->sli_rev == LPFC_SLI_REV4) 6016 spin_unlock(&pring_s4->ring_lock); 6017 out_unlock_buf: 6018 spin_unlock(&lpfc_cmd->buf_lock); 6019 out_unlock: 6020 spin_unlock_irqrestore(&phba->hbalock, flags); 6021 out: 6022 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6023 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 6024 "LUN %llu\n", ret, cmnd->device->id, 6025 cmnd->device->lun); 6026 return ret; 6027 } 6028 6029 static char * 6030 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 6031 { 6032 switch (task_mgmt_cmd) { 6033 case FCP_ABORT_TASK_SET: 6034 return "ABORT_TASK_SET"; 6035 case FCP_CLEAR_TASK_SET: 6036 return "FCP_CLEAR_TASK_SET"; 6037 case FCP_BUS_RESET: 6038 return "FCP_BUS_RESET"; 6039 case FCP_LUN_RESET: 6040 return "FCP_LUN_RESET"; 6041 case FCP_TARGET_RESET: 6042 return "FCP_TARGET_RESET"; 6043 case FCP_CLEAR_ACA: 6044 return "FCP_CLEAR_ACA"; 6045 case FCP_TERMINATE_TASK: 6046 return "FCP_TERMINATE_TASK"; 6047 default: 6048 return "unknown"; 6049 } 6050 } 6051 6052 6053 /** 6054 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 6055 * @vport: The virtual port for which this call is being executed. 6056 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 6057 * 6058 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 6059 * 6060 * Return code : 6061 * 0x2003 - Error 6062 * 0x2002 - Success 6063 **/ 6064 static int 6065 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 6066 { 6067 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 6068 uint32_t rsp_info; 6069 uint32_t rsp_len; 6070 uint8_t rsp_info_code; 6071 int ret = FAILED; 6072 6073 6074 if (fcprsp == NULL) 6075 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6076 "0703 fcp_rsp is missing\n"); 6077 else { 6078 rsp_info = fcprsp->rspStatus2; 6079 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 6080 rsp_info_code = fcprsp->rspInfo3; 6081 6082 6083 lpfc_printf_vlog(vport, KERN_INFO, 6084 LOG_FCP, 6085 "0706 fcp_rsp valid 0x%x," 6086 " rsp len=%d code 0x%x\n", 6087 rsp_info, 6088 rsp_len, rsp_info_code); 6089 6090 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 6091 * field specifies the number of valid bytes of FCP_RSP_INFO. 6092 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 6093 */ 6094 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 6095 ((rsp_len == 8) || (rsp_len == 4))) { 6096 switch (rsp_info_code) { 6097 case RSP_NO_FAILURE: 6098 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6099 "0715 Task Mgmt No Failure\n"); 6100 ret = SUCCESS; 6101 break; 6102 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 6103 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6104 "0716 Task Mgmt Target " 6105 "reject\n"); 6106 break; 6107 case RSP_TM_NOT_COMPLETED: /* TM failed */ 6108 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6109 "0717 Task Mgmt Target " 6110 "failed TM\n"); 6111 break; 6112 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 6113 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6114 "0718 Task Mgmt to invalid " 6115 "LUN\n"); 6116 break; 6117 } 6118 } 6119 } 6120 return ret; 6121 } 6122 6123 6124 /** 6125 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 6126 * @vport: The virtual port for which this call is being executed. 6127 * @cmnd: Pointer to scsi_cmnd data structure. 6128 * @tgt_id: Target ID of remote device. 6129 * @lun_id: Lun number for the TMF 6130 * @task_mgmt_cmd: type of TMF to send 6131 * 6132 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 6133 * a remote port. 6134 * 6135 * Return Code: 6136 * 0x2003 - Error 6137 * 0x2002 - Success. 6138 **/ 6139 static int 6140 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, 6141 unsigned int tgt_id, uint64_t lun_id, 6142 uint8_t task_mgmt_cmd) 6143 { 6144 struct lpfc_hba *phba = vport->phba; 6145 struct lpfc_io_buf *lpfc_cmd; 6146 struct lpfc_iocbq *iocbq; 6147 struct lpfc_iocbq *iocbqrsp; 6148 struct lpfc_rport_data *rdata; 6149 struct lpfc_nodelist *pnode; 6150 int ret; 6151 int status; 6152 6153 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6154 if (!rdata || !rdata->pnode) 6155 return FAILED; 6156 pnode = rdata->pnode; 6157 6158 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); 6159 if (lpfc_cmd == NULL) 6160 return FAILED; 6161 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 6162 lpfc_cmd->rdata = rdata; 6163 lpfc_cmd->pCmd = cmnd; 6164 lpfc_cmd->ndlp = pnode; 6165 6166 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 6167 task_mgmt_cmd); 6168 if (!status) { 6169 lpfc_release_scsi_buf(phba, lpfc_cmd); 6170 return FAILED; 6171 } 6172 6173 iocbq = &lpfc_cmd->cur_iocbq; 6174 iocbqrsp = lpfc_sli_get_iocbq(phba); 6175 if (iocbqrsp == NULL) { 6176 lpfc_release_scsi_buf(phba, lpfc_cmd); 6177 return FAILED; 6178 } 6179 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 6180 6181 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6182 "0702 Issue %s to TGT %d LUN %llu " 6183 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 6184 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 6185 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 6186 iocbq->iocb_flag); 6187 6188 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 6189 iocbq, iocbqrsp, lpfc_cmd->timeout); 6190 if ((status != IOCB_SUCCESS) || 6191 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 6192 if (status != IOCB_SUCCESS || 6193 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 6194 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6195 "0727 TMF %s to TGT %d LUN %llu " 6196 "failed (%d, %d) iocb_flag x%x\n", 6197 lpfc_taskmgmt_name(task_mgmt_cmd), 6198 tgt_id, lun_id, 6199 iocbqrsp->iocb.ulpStatus, 6200 iocbqrsp->iocb.un.ulpWord[4], 6201 iocbq->iocb_flag); 6202 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 6203 if (status == IOCB_SUCCESS) { 6204 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 6205 /* Something in the FCP_RSP was invalid. 6206 * Check conditions */ 6207 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 6208 else 6209 ret = FAILED; 6210 } else if (status == IOCB_TIMEDOUT) { 6211 ret = TIMEOUT_ERROR; 6212 } else { 6213 ret = FAILED; 6214 } 6215 } else 6216 ret = SUCCESS; 6217 6218 lpfc_sli_release_iocbq(phba, iocbqrsp); 6219 6220 if (ret != TIMEOUT_ERROR) 6221 lpfc_release_scsi_buf(phba, lpfc_cmd); 6222 6223 return ret; 6224 } 6225 6226 /** 6227 * lpfc_chk_tgt_mapped - 6228 * @vport: The virtual port to check on 6229 * @cmnd: Pointer to scsi_cmnd data structure. 6230 * 6231 * This routine delays until the scsi target (aka rport) for the 6232 * command exists (is present and logged in) or we declare it non-existent. 6233 * 6234 * Return code : 6235 * 0x2003 - Error 6236 * 0x2002 - Success 6237 **/ 6238 static int 6239 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 6240 { 6241 struct lpfc_rport_data *rdata; 6242 struct lpfc_nodelist *pnode; 6243 unsigned long later; 6244 6245 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6246 if (!rdata) { 6247 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 6248 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 6249 return FAILED; 6250 } 6251 pnode = rdata->pnode; 6252 /* 6253 * If target is not in a MAPPED state, delay until 6254 * target is rediscovered or devloss timeout expires. 6255 */ 6256 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6257 while (time_after(later, jiffies)) { 6258 if (!pnode) 6259 return FAILED; 6260 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 6261 return SUCCESS; 6262 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 6263 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6264 if (!rdata) 6265 return FAILED; 6266 pnode = rdata->pnode; 6267 } 6268 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 6269 return FAILED; 6270 return SUCCESS; 6271 } 6272 6273 /** 6274 * lpfc_reset_flush_io_context - 6275 * @vport: The virtual port (scsi_host) for the flush context 6276 * @tgt_id: If aborting by Target contect - specifies the target id 6277 * @lun_id: If aborting by Lun context - specifies the lun id 6278 * @context: specifies the context level to flush at. 6279 * 6280 * After a reset condition via TMF, we need to flush orphaned i/o 6281 * contexts from the adapter. This routine aborts any contexts 6282 * outstanding, then waits for their completions. The wait is 6283 * bounded by devloss_tmo though. 6284 * 6285 * Return code : 6286 * 0x2003 - Error 6287 * 0x2002 - Success 6288 **/ 6289 static int 6290 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 6291 uint64_t lun_id, lpfc_ctx_cmd context) 6292 { 6293 struct lpfc_hba *phba = vport->phba; 6294 unsigned long later; 6295 int cnt; 6296 6297 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6298 if (cnt) 6299 lpfc_sli_abort_taskmgmt(vport, 6300 &phba->sli.sli3_ring[LPFC_FCP_RING], 6301 tgt_id, lun_id, context); 6302 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 6303 while (time_after(later, jiffies) && cnt) { 6304 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 6305 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 6306 } 6307 if (cnt) { 6308 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6309 "0724 I/O flush failure for context %s : cnt x%x\n", 6310 ((context == LPFC_CTX_LUN) ? "LUN" : 6311 ((context == LPFC_CTX_TGT) ? "TGT" : 6312 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 6313 cnt); 6314 return FAILED; 6315 } 6316 return SUCCESS; 6317 } 6318 6319 /** 6320 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 6321 * @cmnd: Pointer to scsi_cmnd data structure. 6322 * 6323 * This routine does a device reset by sending a LUN_RESET task management 6324 * command. 6325 * 6326 * Return code : 6327 * 0x2003 - Error 6328 * 0x2002 - Success 6329 **/ 6330 static int 6331 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 6332 { 6333 struct Scsi_Host *shost = cmnd->device->host; 6334 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6335 struct lpfc_rport_data *rdata; 6336 struct lpfc_nodelist *pnode; 6337 unsigned tgt_id = cmnd->device->id; 6338 uint64_t lun_id = cmnd->device->lun; 6339 struct lpfc_scsi_event_header scsi_event; 6340 int status; 6341 u32 logit = LOG_FCP; 6342 6343 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6344 if (!rdata || !rdata->pnode) { 6345 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6346 "0798 Device Reset rdata failure: rdata x%px\n", 6347 rdata); 6348 return FAILED; 6349 } 6350 pnode = rdata->pnode; 6351 status = fc_block_scsi_eh(cmnd); 6352 if (status != 0 && status != SUCCESS) 6353 return status; 6354 6355 status = lpfc_chk_tgt_mapped(vport, cmnd); 6356 if (status == FAILED) { 6357 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6358 "0721 Device Reset rport failure: rdata x%px\n", rdata); 6359 return FAILED; 6360 } 6361 6362 scsi_event.event_type = FC_REG_SCSI_EVENT; 6363 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 6364 scsi_event.lun = lun_id; 6365 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6366 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6367 6368 fc_host_post_vendor_event(shost, fc_get_event_number(), 6369 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6370 6371 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6372 FCP_LUN_RESET); 6373 if (status != SUCCESS) 6374 logit = LOG_TRACE_EVENT; 6375 6376 lpfc_printf_vlog(vport, KERN_ERR, logit, 6377 "0713 SCSI layer issued Device Reset (%d, %llu) " 6378 "return x%x\n", tgt_id, lun_id, status); 6379 6380 /* 6381 * We have to clean up i/o as : they may be orphaned by the TMF; 6382 * or if the TMF failed, they may be in an indeterminate state. 6383 * So, continue on. 6384 * We will report success if all the i/o aborts successfully. 6385 */ 6386 if (status == SUCCESS) 6387 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6388 LPFC_CTX_LUN); 6389 6390 return status; 6391 } 6392 6393 /** 6394 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 6395 * @cmnd: Pointer to scsi_cmnd data structure. 6396 * 6397 * This routine does a target reset by sending a TARGET_RESET task management 6398 * command. 6399 * 6400 * Return code : 6401 * 0x2003 - Error 6402 * 0x2002 - Success 6403 **/ 6404 static int 6405 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 6406 { 6407 struct Scsi_Host *shost = cmnd->device->host; 6408 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6409 struct lpfc_rport_data *rdata; 6410 struct lpfc_nodelist *pnode; 6411 unsigned tgt_id = cmnd->device->id; 6412 uint64_t lun_id = cmnd->device->lun; 6413 struct lpfc_scsi_event_header scsi_event; 6414 int status; 6415 u32 logit = LOG_FCP; 6416 u32 dev_loss_tmo = vport->cfg_devloss_tmo; 6417 unsigned long flags; 6418 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 6419 6420 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 6421 if (!rdata || !rdata->pnode) { 6422 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6423 "0799 Target Reset rdata failure: rdata x%px\n", 6424 rdata); 6425 return FAILED; 6426 } 6427 pnode = rdata->pnode; 6428 status = fc_block_scsi_eh(cmnd); 6429 if (status != 0 && status != SUCCESS) 6430 return status; 6431 6432 status = lpfc_chk_tgt_mapped(vport, cmnd); 6433 if (status == FAILED) { 6434 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6435 "0722 Target Reset rport failure: rdata x%px\n", rdata); 6436 if (pnode) { 6437 spin_lock_irqsave(&pnode->lock, flags); 6438 pnode->nlp_flag &= ~NLP_NPR_ADISC; 6439 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6440 spin_unlock_irqrestore(&pnode->lock, flags); 6441 } 6442 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6443 LPFC_CTX_TGT); 6444 return FAST_IO_FAIL; 6445 } 6446 6447 scsi_event.event_type = FC_REG_SCSI_EVENT; 6448 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 6449 scsi_event.lun = 0; 6450 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 6451 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 6452 6453 fc_host_post_vendor_event(shost, fc_get_event_number(), 6454 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6455 6456 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 6457 FCP_TARGET_RESET); 6458 if (status != SUCCESS) { 6459 logit = LOG_TRACE_EVENT; 6460 6461 /* Issue LOGO, if no LOGO is outstanding */ 6462 spin_lock_irqsave(&pnode->lock, flags); 6463 if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && 6464 !pnode->logo_waitq) { 6465 pnode->logo_waitq = &waitq; 6466 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 6467 pnode->nlp_flag |= NLP_ISSUE_LOGO; 6468 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; 6469 spin_unlock_irqrestore(&pnode->lock, flags); 6470 lpfc_unreg_rpi(vport, pnode); 6471 wait_event_timeout(waitq, 6472 (!(pnode->upcall_flags & 6473 NLP_WAIT_FOR_LOGO)), 6474 msecs_to_jiffies(dev_loss_tmo * 6475 1000)); 6476 6477 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { 6478 lpfc_printf_vlog(vport, KERN_ERR, logit, 6479 "0725 SCSI layer TGTRST " 6480 "failed & LOGO TMO (%d, %llu) " 6481 "return x%x\n", 6482 tgt_id, lun_id, status); 6483 spin_lock_irqsave(&pnode->lock, flags); 6484 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 6485 } else { 6486 spin_lock_irqsave(&pnode->lock, flags); 6487 } 6488 pnode->logo_waitq = NULL; 6489 spin_unlock_irqrestore(&pnode->lock, flags); 6490 status = SUCCESS; 6491 6492 } else { 6493 spin_unlock_irqrestore(&pnode->lock, flags); 6494 status = FAILED; 6495 } 6496 } 6497 6498 lpfc_printf_vlog(vport, KERN_ERR, logit, 6499 "0723 SCSI layer issued Target Reset (%d, %llu) " 6500 "return x%x\n", tgt_id, lun_id, status); 6501 6502 /* 6503 * We have to clean up i/o as : they may be orphaned by the TMF; 6504 * or if the TMF failed, they may be in an indeterminate state. 6505 * So, continue on. 6506 * We will report success if all the i/o aborts successfully. 6507 */ 6508 if (status == SUCCESS) 6509 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6510 LPFC_CTX_TGT); 6511 return status; 6512 } 6513 6514 /** 6515 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 6516 * @cmnd: Pointer to scsi_cmnd data structure. 6517 * 6518 * This routine does target reset to all targets on @cmnd->device->host. 6519 * This emulates Parallel SCSI Bus Reset Semantics. 6520 * 6521 * Return code : 6522 * 0x2003 - Error 6523 * 0x2002 - Success 6524 **/ 6525 static int 6526 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 6527 { 6528 struct Scsi_Host *shost = cmnd->device->host; 6529 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6530 struct lpfc_nodelist *ndlp = NULL; 6531 struct lpfc_scsi_event_header scsi_event; 6532 int match; 6533 int ret = SUCCESS, status, i; 6534 u32 logit = LOG_FCP; 6535 6536 scsi_event.event_type = FC_REG_SCSI_EVENT; 6537 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 6538 scsi_event.lun = 0; 6539 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 6540 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 6541 6542 fc_host_post_vendor_event(shost, fc_get_event_number(), 6543 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6544 6545 status = fc_block_scsi_eh(cmnd); 6546 if (status != 0 && status != SUCCESS) 6547 return status; 6548 6549 /* 6550 * Since the driver manages a single bus device, reset all 6551 * targets known to the driver. Should any target reset 6552 * fail, this routine returns failure to the midlayer. 6553 */ 6554 for (i = 0; i < LPFC_MAX_TARGET; i++) { 6555 /* Search for mapped node by target ID */ 6556 match = 0; 6557 spin_lock_irq(shost->host_lock); 6558 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6559 6560 if (vport->phba->cfg_fcp2_no_tgt_reset && 6561 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 6562 continue; 6563 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6564 ndlp->nlp_sid == i && 6565 ndlp->rport && 6566 ndlp->nlp_type & NLP_FCP_TARGET) { 6567 match = 1; 6568 break; 6569 } 6570 } 6571 spin_unlock_irq(shost->host_lock); 6572 if (!match) 6573 continue; 6574 6575 status = lpfc_send_taskmgmt(vport, cmnd, 6576 i, 0, FCP_TARGET_RESET); 6577 6578 if (status != SUCCESS) { 6579 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6580 "0700 Bus Reset on target %d failed\n", 6581 i); 6582 ret = FAILED; 6583 } 6584 } 6585 /* 6586 * We have to clean up i/o as : they may be orphaned by the TMFs 6587 * above; or if any of the TMFs failed, they may be in an 6588 * indeterminate state. 6589 * We will report success if all the i/o aborts successfully. 6590 */ 6591 6592 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 6593 if (status != SUCCESS) 6594 ret = FAILED; 6595 if (ret == FAILED) 6596 logit = LOG_TRACE_EVENT; 6597 6598 lpfc_printf_vlog(vport, KERN_ERR, logit, 6599 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 6600 return ret; 6601 } 6602 6603 /** 6604 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6605 * @cmnd: Pointer to scsi_cmnd data structure. 6606 * 6607 * This routine does host reset to the adaptor port. It brings the HBA 6608 * offline, performs a board restart, and then brings the board back online. 6609 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6610 * reject all outstanding SCSI commands to the host and error returned 6611 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6612 * of error handling, it will only return error if resetting of the adapter 6613 * is not successful; in all other cases, will return success. 6614 * 6615 * Return code : 6616 * 0x2003 - Error 6617 * 0x2002 - Success 6618 **/ 6619 static int 6620 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6621 { 6622 struct Scsi_Host *shost = cmnd->device->host; 6623 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6624 struct lpfc_hba *phba = vport->phba; 6625 int rc, ret = SUCCESS; 6626 6627 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6628 "3172 SCSI layer issued Host Reset Data:\n"); 6629 6630 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6631 lpfc_offline(phba); 6632 rc = lpfc_sli_brdrestart(phba); 6633 if (rc) 6634 goto error; 6635 6636 rc = lpfc_online(phba); 6637 if (rc) 6638 goto error; 6639 6640 lpfc_unblock_mgmt_io(phba); 6641 6642 return ret; 6643 error: 6644 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6645 "3323 Failed host reset\n"); 6646 lpfc_unblock_mgmt_io(phba); 6647 return FAILED; 6648 } 6649 6650 /** 6651 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6652 * @sdev: Pointer to scsi_device. 6653 * 6654 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6655 * globally available list of scsi buffers. This routine also makes sure scsi 6656 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6657 * of scsi buffer exists for the lifetime of the driver. 6658 * 6659 * Return codes: 6660 * non-0 - Error 6661 * 0 - Success 6662 **/ 6663 static int 6664 lpfc_slave_alloc(struct scsi_device *sdev) 6665 { 6666 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6667 struct lpfc_hba *phba = vport->phba; 6668 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6669 uint32_t total = 0; 6670 uint32_t num_to_alloc = 0; 6671 int num_allocated = 0; 6672 uint32_t sdev_cnt; 6673 struct lpfc_device_data *device_data; 6674 unsigned long flags; 6675 struct lpfc_name target_wwpn; 6676 6677 if (!rport || fc_remote_port_chkready(rport)) 6678 return -ENXIO; 6679 6680 if (phba->cfg_fof) { 6681 6682 /* 6683 * Check to see if the device data structure for the lun 6684 * exists. If not, create one. 6685 */ 6686 6687 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6688 spin_lock_irqsave(&phba->devicelock, flags); 6689 device_data = __lpfc_get_device_data(phba, 6690 &phba->luns, 6691 &vport->fc_portname, 6692 &target_wwpn, 6693 sdev->lun); 6694 if (!device_data) { 6695 spin_unlock_irqrestore(&phba->devicelock, flags); 6696 device_data = lpfc_create_device_data(phba, 6697 &vport->fc_portname, 6698 &target_wwpn, 6699 sdev->lun, 6700 phba->cfg_XLanePriority, 6701 true); 6702 if (!device_data) 6703 return -ENOMEM; 6704 spin_lock_irqsave(&phba->devicelock, flags); 6705 list_add_tail(&device_data->listentry, &phba->luns); 6706 } 6707 device_data->rport_data = rport->dd_data; 6708 device_data->available = true; 6709 spin_unlock_irqrestore(&phba->devicelock, flags); 6710 sdev->hostdata = device_data; 6711 } else { 6712 sdev->hostdata = rport->dd_data; 6713 } 6714 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6715 6716 /* For SLI4, all IO buffers are pre-allocated */ 6717 if (phba->sli_rev == LPFC_SLI_REV4) 6718 return 0; 6719 6720 /* This code path is now ONLY for SLI3 adapters */ 6721 6722 /* 6723 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6724 * available list of scsi buffers. Don't allocate more than the 6725 * HBA limit conveyed to the midlayer via the host structure. The 6726 * formula accounts for the lun_queue_depth + error handlers + 1 6727 * extra. This list of scsi bufs exists for the lifetime of the driver. 6728 */ 6729 total = phba->total_scsi_bufs; 6730 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6731 6732 /* If allocated buffers are enough do nothing */ 6733 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6734 return 0; 6735 6736 /* Allow some exchanges to be available always to complete discovery */ 6737 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6738 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6739 "0704 At limitation of %d preallocated " 6740 "command buffers\n", total); 6741 return 0; 6742 /* Allow some exchanges to be available always to complete discovery */ 6743 } else if (total + num_to_alloc > 6744 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6745 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6746 "0705 Allocation request of %d " 6747 "command buffers will exceed max of %d. " 6748 "Reducing allocation request to %d.\n", 6749 num_to_alloc, phba->cfg_hba_queue_depth, 6750 (phba->cfg_hba_queue_depth - total)); 6751 num_to_alloc = phba->cfg_hba_queue_depth - total; 6752 } 6753 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6754 if (num_to_alloc != num_allocated) { 6755 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6756 "0708 Allocation request of %d " 6757 "command buffers did not succeed. " 6758 "Allocated %d buffers.\n", 6759 num_to_alloc, num_allocated); 6760 } 6761 if (num_allocated > 0) 6762 phba->total_scsi_bufs += num_allocated; 6763 return 0; 6764 } 6765 6766 /** 6767 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6768 * @sdev: Pointer to scsi_device. 6769 * 6770 * This routine configures following items 6771 * - Tag command queuing support for @sdev if supported. 6772 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6773 * 6774 * Return codes: 6775 * 0 - Success 6776 **/ 6777 static int 6778 lpfc_slave_configure(struct scsi_device *sdev) 6779 { 6780 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6781 struct lpfc_hba *phba = vport->phba; 6782 6783 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6784 6785 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6786 lpfc_sli_handle_fast_ring_event(phba, 6787 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6788 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6789 lpfc_poll_rearm_timer(phba); 6790 } 6791 6792 return 0; 6793 } 6794 6795 /** 6796 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6797 * @sdev: Pointer to scsi_device. 6798 * 6799 * This routine sets @sdev hostatdata filed to null. 6800 **/ 6801 static void 6802 lpfc_slave_destroy(struct scsi_device *sdev) 6803 { 6804 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6805 struct lpfc_hba *phba = vport->phba; 6806 unsigned long flags; 6807 struct lpfc_device_data *device_data = sdev->hostdata; 6808 6809 atomic_dec(&phba->sdev_cnt); 6810 if ((phba->cfg_fof) && (device_data)) { 6811 spin_lock_irqsave(&phba->devicelock, flags); 6812 device_data->available = false; 6813 if (!device_data->oas_enabled) 6814 lpfc_delete_device_data(phba, device_data); 6815 spin_unlock_irqrestore(&phba->devicelock, flags); 6816 } 6817 sdev->hostdata = NULL; 6818 return; 6819 } 6820 6821 /** 6822 * lpfc_create_device_data - creates and initializes device data structure for OAS 6823 * @phba: Pointer to host bus adapter structure. 6824 * @vport_wwpn: Pointer to vport's wwpn information 6825 * @target_wwpn: Pointer to target's wwpn information 6826 * @lun: Lun on target 6827 * @pri: Priority 6828 * @atomic_create: Flag to indicate if memory should be allocated using the 6829 * GFP_ATOMIC flag or not. 6830 * 6831 * This routine creates a device data structure which will contain identifying 6832 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6833 * whether or not the corresponding lun is available by the system, 6834 * and pointer to the rport data. 6835 * 6836 * Return codes: 6837 * NULL - Error 6838 * Pointer to lpfc_device_data - Success 6839 **/ 6840 struct lpfc_device_data* 6841 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6842 struct lpfc_name *target_wwpn, uint64_t lun, 6843 uint32_t pri, bool atomic_create) 6844 { 6845 6846 struct lpfc_device_data *lun_info; 6847 int memory_flags; 6848 6849 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6850 !(phba->cfg_fof)) 6851 return NULL; 6852 6853 /* Attempt to create the device data to contain lun info */ 6854 6855 if (atomic_create) 6856 memory_flags = GFP_ATOMIC; 6857 else 6858 memory_flags = GFP_KERNEL; 6859 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6860 if (!lun_info) 6861 return NULL; 6862 INIT_LIST_HEAD(&lun_info->listentry); 6863 lun_info->rport_data = NULL; 6864 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6865 sizeof(struct lpfc_name)); 6866 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6867 sizeof(struct lpfc_name)); 6868 lun_info->device_id.lun = lun; 6869 lun_info->oas_enabled = false; 6870 lun_info->priority = pri; 6871 lun_info->available = false; 6872 return lun_info; 6873 } 6874 6875 /** 6876 * lpfc_delete_device_data - frees a device data structure for OAS 6877 * @phba: Pointer to host bus adapter structure. 6878 * @lun_info: Pointer to device data structure to free. 6879 * 6880 * This routine frees the previously allocated device data structure passed. 6881 * 6882 **/ 6883 void 6884 lpfc_delete_device_data(struct lpfc_hba *phba, 6885 struct lpfc_device_data *lun_info) 6886 { 6887 6888 if (unlikely(!phba) || !lun_info || 6889 !(phba->cfg_fof)) 6890 return; 6891 6892 if (!list_empty(&lun_info->listentry)) 6893 list_del(&lun_info->listentry); 6894 mempool_free(lun_info, phba->device_data_mem_pool); 6895 return; 6896 } 6897 6898 /** 6899 * __lpfc_get_device_data - returns the device data for the specified lun 6900 * @phba: Pointer to host bus adapter structure. 6901 * @list: Point to list to search. 6902 * @vport_wwpn: Pointer to vport's wwpn information 6903 * @target_wwpn: Pointer to target's wwpn information 6904 * @lun: Lun on target 6905 * 6906 * This routine searches the list passed for the specified lun's device data. 6907 * This function does not hold locks, it is the responsibility of the caller 6908 * to ensure the proper lock is held before calling the function. 6909 * 6910 * Return codes: 6911 * NULL - Error 6912 * Pointer to lpfc_device_data - Success 6913 **/ 6914 struct lpfc_device_data* 6915 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6916 struct lpfc_name *vport_wwpn, 6917 struct lpfc_name *target_wwpn, uint64_t lun) 6918 { 6919 6920 struct lpfc_device_data *lun_info; 6921 6922 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6923 !phba->cfg_fof) 6924 return NULL; 6925 6926 /* Check to see if the lun is already enabled for OAS. */ 6927 6928 list_for_each_entry(lun_info, list, listentry) { 6929 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6930 sizeof(struct lpfc_name)) == 0) && 6931 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6932 sizeof(struct lpfc_name)) == 0) && 6933 (lun_info->device_id.lun == lun)) 6934 return lun_info; 6935 } 6936 6937 return NULL; 6938 } 6939 6940 /** 6941 * lpfc_find_next_oas_lun - searches for the next oas lun 6942 * @phba: Pointer to host bus adapter structure. 6943 * @vport_wwpn: Pointer to vport's wwpn information 6944 * @target_wwpn: Pointer to target's wwpn information 6945 * @starting_lun: Pointer to the lun to start searching for 6946 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6947 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6948 * @found_lun: Pointer to the found lun. 6949 * @found_lun_status: Pointer to status of the found lun. 6950 * @found_lun_pri: Pointer to priority of the found lun. 6951 * 6952 * This routine searches the luns list for the specified lun 6953 * or the first lun for the vport/target. If the vport wwpn contains 6954 * a zero value then a specific vport is not specified. In this case 6955 * any vport which contains the lun will be considered a match. If the 6956 * target wwpn contains a zero value then a specific target is not specified. 6957 * In this case any target which contains the lun will be considered a 6958 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6959 * are returned. The function will also return the next lun if available. 6960 * If the next lun is not found, starting_lun parameter will be set to 6961 * NO_MORE_OAS_LUN. 6962 * 6963 * Return codes: 6964 * non-0 - Error 6965 * 0 - Success 6966 **/ 6967 bool 6968 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6969 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6970 struct lpfc_name *found_vport_wwpn, 6971 struct lpfc_name *found_target_wwpn, 6972 uint64_t *found_lun, 6973 uint32_t *found_lun_status, 6974 uint32_t *found_lun_pri) 6975 { 6976 6977 unsigned long flags; 6978 struct lpfc_device_data *lun_info; 6979 struct lpfc_device_id *device_id; 6980 uint64_t lun; 6981 bool found = false; 6982 6983 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6984 !starting_lun || !found_vport_wwpn || 6985 !found_target_wwpn || !found_lun || !found_lun_status || 6986 (*starting_lun == NO_MORE_OAS_LUN) || 6987 !phba->cfg_fof) 6988 return false; 6989 6990 lun = *starting_lun; 6991 *found_lun = NO_MORE_OAS_LUN; 6992 *starting_lun = NO_MORE_OAS_LUN; 6993 6994 /* Search for lun or the lun closet in value */ 6995 6996 spin_lock_irqsave(&phba->devicelock, flags); 6997 list_for_each_entry(lun_info, &phba->luns, listentry) { 6998 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6999 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 7000 sizeof(struct lpfc_name)) == 0)) && 7001 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 7002 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 7003 sizeof(struct lpfc_name)) == 0)) && 7004 (lun_info->oas_enabled)) { 7005 device_id = &lun_info->device_id; 7006 if ((!found) && 7007 ((lun == FIND_FIRST_OAS_LUN) || 7008 (device_id->lun == lun))) { 7009 *found_lun = device_id->lun; 7010 memcpy(found_vport_wwpn, 7011 &device_id->vport_wwpn, 7012 sizeof(struct lpfc_name)); 7013 memcpy(found_target_wwpn, 7014 &device_id->target_wwpn, 7015 sizeof(struct lpfc_name)); 7016 if (lun_info->available) 7017 *found_lun_status = 7018 OAS_LUN_STATUS_EXISTS; 7019 else 7020 *found_lun_status = 0; 7021 *found_lun_pri = lun_info->priority; 7022 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 7023 memset(vport_wwpn, 0x0, 7024 sizeof(struct lpfc_name)); 7025 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 7026 memset(target_wwpn, 0x0, 7027 sizeof(struct lpfc_name)); 7028 found = true; 7029 } else if (found) { 7030 *starting_lun = device_id->lun; 7031 memcpy(vport_wwpn, &device_id->vport_wwpn, 7032 sizeof(struct lpfc_name)); 7033 memcpy(target_wwpn, &device_id->target_wwpn, 7034 sizeof(struct lpfc_name)); 7035 break; 7036 } 7037 } 7038 } 7039 spin_unlock_irqrestore(&phba->devicelock, flags); 7040 return found; 7041 } 7042 7043 /** 7044 * lpfc_enable_oas_lun - enables a lun for OAS operations 7045 * @phba: Pointer to host bus adapter structure. 7046 * @vport_wwpn: Pointer to vport's wwpn information 7047 * @target_wwpn: Pointer to target's wwpn information 7048 * @lun: Lun 7049 * @pri: Priority 7050 * 7051 * This routine enables a lun for oas operations. The routines does so by 7052 * doing the following : 7053 * 7054 * 1) Checks to see if the device data for the lun has been created. 7055 * 2) If found, sets the OAS enabled flag if not set and returns. 7056 * 3) Otherwise, creates a device data structure. 7057 * 4) If successfully created, indicates the device data is for an OAS lun, 7058 * indicates the lun is not available and add to the list of luns. 7059 * 7060 * Return codes: 7061 * false - Error 7062 * true - Success 7063 **/ 7064 bool 7065 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 7066 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 7067 { 7068 7069 struct lpfc_device_data *lun_info; 7070 unsigned long flags; 7071 7072 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 7073 !phba->cfg_fof) 7074 return false; 7075 7076 spin_lock_irqsave(&phba->devicelock, flags); 7077 7078 /* Check to see if the device data for the lun has been created */ 7079 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 7080 target_wwpn, lun); 7081 if (lun_info) { 7082 if (!lun_info->oas_enabled) 7083 lun_info->oas_enabled = true; 7084 lun_info->priority = pri; 7085 spin_unlock_irqrestore(&phba->devicelock, flags); 7086 return true; 7087 } 7088 7089 /* Create an lun info structure and add to list of luns */ 7090 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 7091 pri, true); 7092 if (lun_info) { 7093 lun_info->oas_enabled = true; 7094 lun_info->priority = pri; 7095 lun_info->available = false; 7096 list_add_tail(&lun_info->listentry, &phba->luns); 7097 spin_unlock_irqrestore(&phba->devicelock, flags); 7098 return true; 7099 } 7100 spin_unlock_irqrestore(&phba->devicelock, flags); 7101 return false; 7102 } 7103 7104 /** 7105 * lpfc_disable_oas_lun - disables a lun for OAS operations 7106 * @phba: Pointer to host bus adapter structure. 7107 * @vport_wwpn: Pointer to vport's wwpn information 7108 * @target_wwpn: Pointer to target's wwpn information 7109 * @lun: Lun 7110 * @pri: Priority 7111 * 7112 * This routine disables a lun for oas operations. The routines does so by 7113 * doing the following : 7114 * 7115 * 1) Checks to see if the device data for the lun is created. 7116 * 2) If present, clears the flag indicating this lun is for OAS. 7117 * 3) If the lun is not available by the system, the device data is 7118 * freed. 7119 * 7120 * Return codes: 7121 * false - Error 7122 * true - Success 7123 **/ 7124 bool 7125 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 7126 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 7127 { 7128 7129 struct lpfc_device_data *lun_info; 7130 unsigned long flags; 7131 7132 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 7133 !phba->cfg_fof) 7134 return false; 7135 7136 spin_lock_irqsave(&phba->devicelock, flags); 7137 7138 /* Check to see if the lun is available. */ 7139 lun_info = __lpfc_get_device_data(phba, 7140 &phba->luns, vport_wwpn, 7141 target_wwpn, lun); 7142 if (lun_info) { 7143 lun_info->oas_enabled = false; 7144 lun_info->priority = pri; 7145 if (!lun_info->available) 7146 lpfc_delete_device_data(phba, lun_info); 7147 spin_unlock_irqrestore(&phba->devicelock, flags); 7148 return true; 7149 } 7150 7151 spin_unlock_irqrestore(&phba->devicelock, flags); 7152 return false; 7153 } 7154 7155 static int 7156 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 7157 { 7158 return SCSI_MLQUEUE_HOST_BUSY; 7159 } 7160 7161 static int 7162 lpfc_no_handler(struct scsi_cmnd *cmnd) 7163 { 7164 return FAILED; 7165 } 7166 7167 static int 7168 lpfc_no_slave(struct scsi_device *sdev) 7169 { 7170 return -ENODEV; 7171 } 7172 7173 struct scsi_host_template lpfc_template_nvme = { 7174 .module = THIS_MODULE, 7175 .name = LPFC_DRIVER_NAME, 7176 .proc_name = LPFC_DRIVER_NAME, 7177 .info = lpfc_info, 7178 .queuecommand = lpfc_no_command, 7179 .eh_abort_handler = lpfc_no_handler, 7180 .eh_device_reset_handler = lpfc_no_handler, 7181 .eh_target_reset_handler = lpfc_no_handler, 7182 .eh_bus_reset_handler = lpfc_no_handler, 7183 .eh_host_reset_handler = lpfc_no_handler, 7184 .slave_alloc = lpfc_no_slave, 7185 .slave_configure = lpfc_no_slave, 7186 .scan_finished = lpfc_scan_finished, 7187 .this_id = -1, 7188 .sg_tablesize = 1, 7189 .cmd_per_lun = 1, 7190 .shost_attrs = lpfc_hba_attrs, 7191 .max_sectors = 0xFFFFFFFF, 7192 .vendor_id = LPFC_NL_VENDOR_ID, 7193 .track_queue_depth = 0, 7194 }; 7195 7196 struct scsi_host_template lpfc_template = { 7197 .module = THIS_MODULE, 7198 .name = LPFC_DRIVER_NAME, 7199 .proc_name = LPFC_DRIVER_NAME, 7200 .info = lpfc_info, 7201 .queuecommand = lpfc_queuecommand, 7202 .eh_timed_out = fc_eh_timed_out, 7203 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 7204 .eh_abort_handler = lpfc_abort_handler, 7205 .eh_device_reset_handler = lpfc_device_reset_handler, 7206 .eh_target_reset_handler = lpfc_target_reset_handler, 7207 .eh_bus_reset_handler = lpfc_bus_reset_handler, 7208 .eh_host_reset_handler = lpfc_host_reset_handler, 7209 .slave_alloc = lpfc_slave_alloc, 7210 .slave_configure = lpfc_slave_configure, 7211 .slave_destroy = lpfc_slave_destroy, 7212 .scan_finished = lpfc_scan_finished, 7213 .this_id = -1, 7214 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 7215 .cmd_per_lun = LPFC_CMD_PER_LUN, 7216 .shost_attrs = lpfc_hba_attrs, 7217 .max_sectors = 0xFFFFFFFF, 7218 .vendor_id = LPFC_NL_VENDOR_ID, 7219 .change_queue_depth = scsi_change_queue_depth, 7220 .track_queue_depth = 1, 7221 }; 7222