1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 #include <linux/pci.h> 24 #include <linux/slab.h> 25 #include <linux/interrupt.h> 26 #include <linux/export.h> 27 #include <linux/delay.h> 28 #include <asm/unaligned.h> 29 #include <linux/t10-pi.h> 30 #include <linux/crc-t10dif.h> 31 #include <net/checksum.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_eh.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_tcq.h> 38 #include <scsi/scsi_transport_fc.h> 39 40 #include "lpfc_version.h" 41 #include "lpfc_hw4.h" 42 #include "lpfc_hw.h" 43 #include "lpfc_sli.h" 44 #include "lpfc_sli4.h" 45 #include "lpfc_nl.h" 46 #include "lpfc_disc.h" 47 #include "lpfc.h" 48 #include "lpfc_scsi.h" 49 #include "lpfc_logmsg.h" 50 #include "lpfc_crtn.h" 51 #include "lpfc_vport.h" 52 53 #define LPFC_RESET_WAIT 2 54 #define LPFC_ABORT_WAIT 2 55 56 static char *dif_op_str[] = { 57 "PROT_NORMAL", 58 "PROT_READ_INSERT", 59 "PROT_WRITE_STRIP", 60 "PROT_READ_STRIP", 61 "PROT_WRITE_INSERT", 62 "PROT_READ_PASS", 63 "PROT_WRITE_PASS", 64 }; 65 66 struct scsi_dif_tuple { 67 __be16 guard_tag; /* Checksum */ 68 __be16 app_tag; /* Opaque storage */ 69 __be32 ref_tag; /* Target LBA or indirect LBA */ 70 }; 71 72 static struct lpfc_rport_data * 73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) 74 { 75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; 76 77 if (vport->phba->cfg_fof) 78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; 79 else 80 return (struct lpfc_rport_data *)sdev->hostdata; 81 } 82 83 static void 84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 85 static void 86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); 87 static int 88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); 89 90 static inline unsigned 91 lpfc_cmd_blksize(struct scsi_cmnd *sc) 92 { 93 return sc->device->sector_size; 94 } 95 96 #define LPFC_CHECK_PROTECT_GUARD 1 97 #define LPFC_CHECK_PROTECT_REF 2 98 static inline unsigned 99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) 100 { 101 return 1; 102 } 103 104 static inline unsigned 105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc) 106 { 107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) 108 return 0; 109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) 110 return 1; 111 return 0; 112 } 113 114 /** 115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 116 * @phba: Pointer to HBA object. 117 * @lpfc_cmd: lpfc scsi command object pointer. 118 * 119 * This function is called from the lpfc_prep_task_mgmt_cmd function to 120 * set the last bit in the response sge entry. 121 **/ 122 static void 123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 124 struct lpfc_io_buf *lpfc_cmd) 125 { 126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 127 if (sgl) { 128 sgl += 1; 129 sgl->word2 = le32_to_cpu(sgl->word2); 130 bf_set(lpfc_sli4_sge_last, sgl, 1); 131 sgl->word2 = cpu_to_le32(sgl->word2); 132 } 133 } 134 135 #define LPFC_INVALID_REFTAG ((u32)-1) 136 137 /** 138 * lpfc_update_stats - Update statistical data for the command completion 139 * @vport: The virtual port on which this call is executing. 140 * @lpfc_cmd: lpfc scsi command object pointer. 141 * 142 * This function is called when there is a command completion and this 143 * function updates the statistical data for the command completion. 144 **/ 145 static void 146 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 147 { 148 struct lpfc_hba *phba = vport->phba; 149 struct lpfc_rport_data *rdata; 150 struct lpfc_nodelist *pnode; 151 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 152 unsigned long flags; 153 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 154 unsigned long latency; 155 int i; 156 157 if (!vport->stat_data_enabled || 158 vport->stat_data_blocked || 159 (cmd->result)) 160 return; 161 162 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 163 rdata = lpfc_cmd->rdata; 164 pnode = rdata->pnode; 165 166 spin_lock_irqsave(shost->host_lock, flags); 167 if (!pnode || 168 !pnode->lat_data || 169 (phba->bucket_type == LPFC_NO_BUCKET)) { 170 spin_unlock_irqrestore(shost->host_lock, flags); 171 return; 172 } 173 174 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 175 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 176 phba->bucket_step; 177 /* check array subscript bounds */ 178 if (i < 0) 179 i = 0; 180 else if (i >= LPFC_MAX_BUCKET_COUNT) 181 i = LPFC_MAX_BUCKET_COUNT - 1; 182 } else { 183 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 184 if (latency <= (phba->bucket_base + 185 ((1<<i)*phba->bucket_step))) 186 break; 187 } 188 189 pnode->lat_data[i].cmd_count++; 190 spin_unlock_irqrestore(shost->host_lock, flags); 191 } 192 193 /** 194 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 195 * @phba: The Hba for which this call is being executed. 196 * 197 * This routine is called when there is resource error in driver or firmware. 198 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 199 * posts at most 1 event each second. This routine wakes up worker thread of 200 * @phba to process WORKER_RAM_DOWN_EVENT event. 201 * 202 * This routine should be called with no lock held. 203 **/ 204 void 205 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 206 { 207 unsigned long flags; 208 uint32_t evt_posted; 209 unsigned long expires; 210 211 spin_lock_irqsave(&phba->hbalock, flags); 212 atomic_inc(&phba->num_rsrc_err); 213 phba->last_rsrc_error_time = jiffies; 214 215 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL; 216 if (time_after(expires, jiffies)) { 217 spin_unlock_irqrestore(&phba->hbalock, flags); 218 return; 219 } 220 221 phba->last_ramp_down_time = jiffies; 222 223 spin_unlock_irqrestore(&phba->hbalock, flags); 224 225 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 226 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 227 if (!evt_posted) 228 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 229 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 230 231 if (!evt_posted) 232 lpfc_worker_wake_up(phba); 233 return; 234 } 235 236 /** 237 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 238 * @phba: The Hba for which this call is being executed. 239 * 240 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 241 * thread.This routine reduces queue depth for all scsi device on each vport 242 * associated with @phba. 243 **/ 244 void 245 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 246 { 247 struct lpfc_vport **vports; 248 struct Scsi_Host *shost; 249 struct scsi_device *sdev; 250 unsigned long new_queue_depth; 251 unsigned long num_rsrc_err, num_cmd_success; 252 int i; 253 254 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 255 num_cmd_success = atomic_read(&phba->num_cmd_success); 256 257 /* 258 * The error and success command counters are global per 259 * driver instance. If another handler has already 260 * operated on this error event, just exit. 261 */ 262 if (num_rsrc_err == 0) 263 return; 264 265 vports = lpfc_create_vport_work_array(phba); 266 if (vports != NULL) 267 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 268 shost = lpfc_shost_from_vport(vports[i]); 269 shost_for_each_device(sdev, shost) { 270 new_queue_depth = 271 sdev->queue_depth * num_rsrc_err / 272 (num_rsrc_err + num_cmd_success); 273 if (!new_queue_depth) 274 new_queue_depth = sdev->queue_depth - 1; 275 else 276 new_queue_depth = sdev->queue_depth - 277 new_queue_depth; 278 scsi_change_queue_depth(sdev, new_queue_depth); 279 } 280 } 281 lpfc_destroy_vport_work_array(phba, vports); 282 atomic_set(&phba->num_rsrc_err, 0); 283 atomic_set(&phba->num_cmd_success, 0); 284 } 285 286 /** 287 * lpfc_scsi_dev_block - set all scsi hosts to block state 288 * @phba: Pointer to HBA context object. 289 * 290 * This function walks vport list and set each SCSI host to block state 291 * by invoking fc_remote_port_delete() routine. This function is invoked 292 * with EEH when device's PCI slot has been permanently disabled. 293 **/ 294 void 295 lpfc_scsi_dev_block(struct lpfc_hba *phba) 296 { 297 struct lpfc_vport **vports; 298 struct Scsi_Host *shost; 299 struct scsi_device *sdev; 300 struct fc_rport *rport; 301 int i; 302 303 vports = lpfc_create_vport_work_array(phba); 304 if (vports != NULL) 305 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 306 shost = lpfc_shost_from_vport(vports[i]); 307 shost_for_each_device(sdev, shost) { 308 rport = starget_to_rport(scsi_target(sdev)); 309 fc_remote_port_delete(rport); 310 } 311 } 312 lpfc_destroy_vport_work_array(phba, vports); 313 } 314 315 /** 316 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 317 * @vport: The virtual port for which this call being executed. 318 * @num_to_alloc: The requested number of buffers to allocate. 319 * 320 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 321 * the scsi buffer contains all the necessary information needed to initiate 322 * a SCSI I/O. The non-DMAable buffer region contains information to build 323 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 324 * and the initial BPL. In addition to allocating memory, the FCP CMND and 325 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 326 * 327 * Return codes: 328 * int - number of scsi buffers that were allocated. 329 * 0 = failure, less than num_to_alloc is a partial failure. 330 **/ 331 static int 332 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 333 { 334 struct lpfc_hba *phba = vport->phba; 335 struct lpfc_io_buf *psb; 336 struct ulp_bde64 *bpl; 337 IOCB_t *iocb; 338 dma_addr_t pdma_phys_fcp_cmd; 339 dma_addr_t pdma_phys_fcp_rsp; 340 dma_addr_t pdma_phys_sgl; 341 uint16_t iotag; 342 int bcnt, bpl_size; 343 344 bpl_size = phba->cfg_sg_dma_buf_size - 345 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 346 347 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 348 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", 349 num_to_alloc, phba->cfg_sg_dma_buf_size, 350 (int)sizeof(struct fcp_cmnd), 351 (int)sizeof(struct fcp_rsp), bpl_size); 352 353 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 354 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); 355 if (!psb) 356 break; 357 358 /* 359 * Get memory from the pci pool to map the virt space to pci 360 * bus space for an I/O. The DMA buffer includes space for the 361 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 362 * necessary to support the sg_tablesize. 363 */ 364 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, 365 GFP_KERNEL, &psb->dma_handle); 366 if (!psb->data) { 367 kfree(psb); 368 break; 369 } 370 371 372 /* Allocate iotag for psb->cur_iocbq. */ 373 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 374 if (iotag == 0) { 375 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 376 psb->data, psb->dma_handle); 377 kfree(psb); 378 break; 379 } 380 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 381 382 psb->fcp_cmnd = psb->data; 383 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 384 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + 385 sizeof(struct fcp_rsp); 386 387 /* Initialize local short-hand pointers. */ 388 bpl = (struct ulp_bde64 *)psb->dma_sgl; 389 pdma_phys_fcp_cmd = psb->dma_handle; 390 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 391 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + 392 sizeof(struct fcp_rsp); 393 394 /* 395 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 396 * are sg list bdes. Initialize the first two and leave the 397 * rest for queuecommand. 398 */ 399 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 400 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 401 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 402 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 403 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 404 405 /* Setup the physical region for the FCP RSP */ 406 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 407 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 408 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 409 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 410 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 411 412 /* 413 * Since the IOCB for the FCP I/O is built into this 414 * lpfc_scsi_buf, initialize it with all known data now. 415 */ 416 iocb = &psb->cur_iocbq.iocb; 417 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 418 if ((phba->sli_rev == 3) && 419 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 420 /* fill in immediate fcp command BDE */ 421 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 422 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 423 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 424 unsli3.fcp_ext.icd); 425 iocb->un.fcpi64.bdl.addrHigh = 0; 426 iocb->ulpBdeCount = 0; 427 iocb->ulpLe = 0; 428 /* fill in response BDE */ 429 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 430 BUFF_TYPE_BDE_64; 431 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 432 sizeof(struct fcp_rsp); 433 iocb->unsli3.fcp_ext.rbde.addrLow = 434 putPaddrLow(pdma_phys_fcp_rsp); 435 iocb->unsli3.fcp_ext.rbde.addrHigh = 436 putPaddrHigh(pdma_phys_fcp_rsp); 437 } else { 438 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 439 iocb->un.fcpi64.bdl.bdeSize = 440 (2 * sizeof(struct ulp_bde64)); 441 iocb->un.fcpi64.bdl.addrLow = 442 putPaddrLow(pdma_phys_sgl); 443 iocb->un.fcpi64.bdl.addrHigh = 444 putPaddrHigh(pdma_phys_sgl); 445 iocb->ulpBdeCount = 1; 446 iocb->ulpLe = 1; 447 } 448 iocb->ulpClass = CLASS3; 449 psb->status = IOSTAT_SUCCESS; 450 /* Put it back into the SCSI buffer list */ 451 psb->cur_iocbq.context1 = psb; 452 spin_lock_init(&psb->buf_lock); 453 lpfc_release_scsi_buf_s3(phba, psb); 454 455 } 456 457 return bcnt; 458 } 459 460 /** 461 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport 462 * @vport: pointer to lpfc vport data structure. 463 * 464 * This routine is invoked by the vport cleanup for deletions and the cleanup 465 * for an ndlp on removal. 466 **/ 467 void 468 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) 469 { 470 struct lpfc_hba *phba = vport->phba; 471 struct lpfc_io_buf *psb, *next_psb; 472 struct lpfc_sli4_hdw_queue *qp; 473 unsigned long iflag = 0; 474 int idx; 475 476 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 477 return; 478 479 spin_lock_irqsave(&phba->hbalock, iflag); 480 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { 481 qp = &phba->sli4_hba.hdwq[idx]; 482 483 spin_lock(&qp->abts_io_buf_list_lock); 484 list_for_each_entry_safe(psb, next_psb, 485 &qp->lpfc_abts_io_buf_list, list) { 486 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) 487 continue; 488 489 if (psb->rdata && psb->rdata->pnode && 490 psb->rdata->pnode->vport == vport) 491 psb->rdata = NULL; 492 } 493 spin_unlock(&qp->abts_io_buf_list_lock); 494 } 495 spin_unlock_irqrestore(&phba->hbalock, iflag); 496 } 497 498 /** 499 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort 500 * @phba: pointer to lpfc hba data structure. 501 * @axri: pointer to the fcp xri abort wcqe structure. 502 * @idx: index into hdwq 503 * 504 * This routine is invoked by the worker thread to process a SLI4 fast-path 505 * FCP or NVME aborted xri. 506 **/ 507 void 508 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, 509 struct sli4_wcqe_xri_aborted *axri, int idx) 510 { 511 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 512 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 513 struct lpfc_io_buf *psb, *next_psb; 514 struct lpfc_sli4_hdw_queue *qp; 515 unsigned long iflag = 0; 516 struct lpfc_iocbq *iocbq; 517 int i; 518 struct lpfc_nodelist *ndlp; 519 int rrq_empty = 0; 520 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; 521 522 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) 523 return; 524 525 qp = &phba->sli4_hba.hdwq[idx]; 526 spin_lock_irqsave(&phba->hbalock, iflag); 527 spin_lock(&qp->abts_io_buf_list_lock); 528 list_for_each_entry_safe(psb, next_psb, 529 &qp->lpfc_abts_io_buf_list, list) { 530 if (psb->cur_iocbq.sli4_xritag == xri) { 531 list_del_init(&psb->list); 532 psb->flags &= ~LPFC_SBUF_XBUSY; 533 psb->status = IOSTAT_SUCCESS; 534 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { 535 qp->abts_nvme_io_bufs--; 536 spin_unlock(&qp->abts_io_buf_list_lock); 537 spin_unlock_irqrestore(&phba->hbalock, iflag); 538 lpfc_sli4_nvme_xri_aborted(phba, axri, psb); 539 return; 540 } 541 qp->abts_scsi_io_bufs--; 542 spin_unlock(&qp->abts_io_buf_list_lock); 543 544 if (psb->rdata && psb->rdata->pnode) 545 ndlp = psb->rdata->pnode; 546 else 547 ndlp = NULL; 548 549 rrq_empty = list_empty(&phba->active_rrq_list); 550 spin_unlock_irqrestore(&phba->hbalock, iflag); 551 if (ndlp) { 552 lpfc_set_rrq_active(phba, ndlp, 553 psb->cur_iocbq.sli4_lxritag, rxid, 1); 554 lpfc_sli4_abts_err_handler(phba, ndlp, axri); 555 } 556 lpfc_release_scsi_buf_s4(phba, psb); 557 if (rrq_empty) 558 lpfc_worker_wake_up(phba); 559 return; 560 } 561 } 562 spin_unlock(&qp->abts_io_buf_list_lock); 563 for (i = 1; i <= phba->sli.last_iotag; i++) { 564 iocbq = phba->sli.iocbq_lookup[i]; 565 566 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 567 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 568 continue; 569 if (iocbq->sli4_xritag != xri) 570 continue; 571 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 572 psb->flags &= ~LPFC_SBUF_XBUSY; 573 spin_unlock_irqrestore(&phba->hbalock, iflag); 574 if (!list_empty(&pring->txq)) 575 lpfc_worker_wake_up(phba); 576 return; 577 578 } 579 spin_unlock_irqrestore(&phba->hbalock, iflag); 580 } 581 582 /** 583 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 584 * @phba: The HBA for which this call is being executed. 585 * @ndlp: pointer to a node-list data structure. 586 * @cmnd: Pointer to scsi_cmnd data structure. 587 * 588 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 589 * and returns to caller. 590 * 591 * Return codes: 592 * NULL - Error 593 * Pointer to lpfc_scsi_buf - Success 594 **/ 595 static struct lpfc_io_buf * 596 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 597 struct scsi_cmnd *cmnd) 598 { 599 struct lpfc_io_buf *lpfc_cmd = NULL; 600 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; 601 unsigned long iflag = 0; 602 603 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); 604 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, 605 list); 606 if (!lpfc_cmd) { 607 spin_lock(&phba->scsi_buf_list_put_lock); 608 list_splice(&phba->lpfc_scsi_buf_list_put, 609 &phba->lpfc_scsi_buf_list_get); 610 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 611 list_remove_head(scsi_buf_list_get, lpfc_cmd, 612 struct lpfc_io_buf, list); 613 spin_unlock(&phba->scsi_buf_list_put_lock); 614 } 615 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); 616 617 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { 618 atomic_inc(&ndlp->cmd_pending); 619 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 620 } 621 return lpfc_cmd; 622 } 623 /** 624 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA 625 * @phba: The HBA for which this call is being executed. 626 * @ndlp: pointer to a node-list data structure. 627 * @cmnd: Pointer to scsi_cmnd data structure. 628 * 629 * This routine removes a scsi buffer from head of @hdwq io_buf_list 630 * and returns to caller. 631 * 632 * Return codes: 633 * NULL - Error 634 * Pointer to lpfc_scsi_buf - Success 635 **/ 636 static struct lpfc_io_buf * 637 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 638 struct scsi_cmnd *cmnd) 639 { 640 struct lpfc_io_buf *lpfc_cmd; 641 struct lpfc_sli4_hdw_queue *qp; 642 struct sli4_sge *sgl; 643 dma_addr_t pdma_phys_fcp_rsp; 644 dma_addr_t pdma_phys_fcp_cmd; 645 uint32_t cpu, idx; 646 int tag; 647 struct fcp_cmd_rsp_buf *tmp = NULL; 648 649 cpu = raw_smp_processor_id(); 650 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { 651 tag = blk_mq_unique_tag(cmnd->request); 652 idx = blk_mq_unique_tag_to_hwq(tag); 653 } else { 654 idx = phba->sli4_hba.cpu_map[cpu].hdwq; 655 } 656 657 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, 658 !phba->cfg_xri_rebalancing); 659 if (!lpfc_cmd) { 660 qp = &phba->sli4_hba.hdwq[idx]; 661 qp->empty_io_bufs++; 662 return NULL; 663 } 664 665 /* Setup key fields in buffer that may have been changed 666 * if other protocols used this buffer. 667 */ 668 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP; 669 lpfc_cmd->prot_seg_cnt = 0; 670 lpfc_cmd->seg_cnt = 0; 671 lpfc_cmd->timeout = 0; 672 lpfc_cmd->flags = 0; 673 lpfc_cmd->start_time = jiffies; 674 lpfc_cmd->waitq = NULL; 675 lpfc_cmd->cpu = cpu; 676 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 677 lpfc_cmd->prot_data_type = 0; 678 #endif 679 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); 680 if (!tmp) { 681 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq); 682 return NULL; 683 } 684 685 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; 686 lpfc_cmd->fcp_rsp = tmp->fcp_rsp; 687 688 /* 689 * The first two SGEs are the FCP_CMD and FCP_RSP. 690 * The balance are sg list bdes. Initialize the 691 * first two and leave the rest for queuecommand. 692 */ 693 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 694 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; 695 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 696 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 697 sgl->word2 = le32_to_cpu(sgl->word2); 698 bf_set(lpfc_sli4_sge_last, sgl, 0); 699 sgl->word2 = cpu_to_le32(sgl->word2); 700 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 701 sgl++; 702 703 /* Setup the physical region for the FCP RSP */ 704 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 705 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 706 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 707 sgl->word2 = le32_to_cpu(sgl->word2); 708 bf_set(lpfc_sli4_sge_last, sgl, 1); 709 sgl->word2 = cpu_to_le32(sgl->word2); 710 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 711 712 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 713 atomic_inc(&ndlp->cmd_pending); 714 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; 715 } 716 return lpfc_cmd; 717 } 718 /** 719 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 720 * @phba: The HBA for which this call is being executed. 721 * @ndlp: pointer to a node-list data structure. 722 * @cmnd: Pointer to scsi_cmnd data structure. 723 * 724 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 725 * and returns to caller. 726 * 727 * Return codes: 728 * NULL - Error 729 * Pointer to lpfc_scsi_buf - Success 730 **/ 731 static struct lpfc_io_buf* 732 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 733 struct scsi_cmnd *cmnd) 734 { 735 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); 736 } 737 738 /** 739 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list 740 * @phba: The Hba for which this call is being executed. 741 * @psb: The scsi buffer which is being released. 742 * 743 * This routine releases @psb scsi buffer by adding it to tail of @phba 744 * lpfc_scsi_buf_list list. 745 **/ 746 static void 747 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 748 { 749 unsigned long iflag = 0; 750 751 psb->seg_cnt = 0; 752 psb->prot_seg_cnt = 0; 753 754 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 755 psb->pCmd = NULL; 756 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; 757 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); 758 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 759 } 760 761 /** 762 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 763 * @phba: The Hba for which this call is being executed. 764 * @psb: The scsi buffer which is being released. 765 * 766 * This routine releases @psb scsi buffer by adding it to tail of @hdwq 767 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer 768 * and cannot be reused for at least RA_TOV amount of time if it was 769 * aborted. 770 **/ 771 static void 772 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 773 { 774 struct lpfc_sli4_hdw_queue *qp; 775 unsigned long iflag = 0; 776 777 psb->seg_cnt = 0; 778 psb->prot_seg_cnt = 0; 779 780 qp = psb->hdwq; 781 if (psb->flags & LPFC_SBUF_XBUSY) { 782 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); 783 psb->pCmd = NULL; 784 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); 785 qp->abts_scsi_io_bufs++; 786 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); 787 } else { 788 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); 789 } 790 } 791 792 /** 793 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 794 * @phba: The Hba for which this call is being executed. 795 * @psb: The scsi buffer which is being released. 796 * 797 * This routine releases @psb scsi buffer by adding it to tail of @phba 798 * lpfc_scsi_buf_list list. 799 **/ 800 static void 801 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 802 { 803 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) 804 atomic_dec(&psb->ndlp->cmd_pending); 805 806 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; 807 phba->lpfc_release_scsi_buf(phba, psb); 808 } 809 810 /** 811 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 812 * @data: A pointer to the immediate command data portion of the IOCB. 813 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 814 * 815 * The routine copies the entire FCP command from @fcp_cmnd to @data while 816 * byte swapping the data to big endian format for transmission on the wire. 817 **/ 818 static void 819 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd) 820 { 821 int i, j; 822 823 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 824 i += sizeof(uint32_t), j++) { 825 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 826 } 827 } 828 829 /** 830 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 831 * @phba: The Hba for which this call is being executed. 832 * @lpfc_cmd: The scsi buffer which is going to be mapped. 833 * 834 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 835 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 836 * through sg elements and format the bde. This routine also initializes all 837 * IOCB fields which are dependent on scsi command request buffer. 838 * 839 * Return codes: 840 * 1 - Error 841 * 0 - Success 842 **/ 843 static int 844 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 845 { 846 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 847 struct scatterlist *sgel = NULL; 848 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 849 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 850 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 851 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 852 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 853 dma_addr_t physaddr; 854 uint32_t num_bde = 0; 855 int nseg, datadir = scsi_cmnd->sc_data_direction; 856 857 /* 858 * There are three possibilities here - use scatter-gather segment, use 859 * the single mapping, or neither. Start the lpfc command prep by 860 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 861 * data bde entry. 862 */ 863 bpl += 2; 864 if (scsi_sg_count(scsi_cmnd)) { 865 /* 866 * The driver stores the segment count returned from pci_map_sg 867 * because this a count of dma-mappings used to map the use_sg 868 * pages. They are not guaranteed to be the same for those 869 * architectures that implement an IOMMU. 870 */ 871 872 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 873 scsi_sg_count(scsi_cmnd), datadir); 874 if (unlikely(!nseg)) 875 return 1; 876 877 lpfc_cmd->seg_cnt = nseg; 878 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 880 "9064 BLKGRD: %s: Too many sg segments" 881 " from dma_map_sg. Config %d, seg_cnt" 882 " %d\n", __func__, phba->cfg_sg_seg_cnt, 883 lpfc_cmd->seg_cnt); 884 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 885 lpfc_cmd->seg_cnt = 0; 886 scsi_dma_unmap(scsi_cmnd); 887 return 2; 888 } 889 890 /* 891 * The driver established a maximum scatter-gather segment count 892 * during probe that limits the number of sg elements in any 893 * single scsi command. Just run through the seg_cnt and format 894 * the bde's. 895 * When using SLI-3 the driver will try to fit all the BDEs into 896 * the IOCB. If it can't then the BDEs get added to a BPL as it 897 * does for SLI-2 mode. 898 */ 899 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 900 physaddr = sg_dma_address(sgel); 901 if (phba->sli_rev == 3 && 902 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 903 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 904 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 905 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 906 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 907 data_bde->addrLow = putPaddrLow(physaddr); 908 data_bde->addrHigh = putPaddrHigh(physaddr); 909 data_bde++; 910 } else { 911 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 912 bpl->tus.f.bdeSize = sg_dma_len(sgel); 913 bpl->tus.w = le32_to_cpu(bpl->tus.w); 914 bpl->addrLow = 915 le32_to_cpu(putPaddrLow(physaddr)); 916 bpl->addrHigh = 917 le32_to_cpu(putPaddrHigh(physaddr)); 918 bpl++; 919 } 920 } 921 } 922 923 /* 924 * Finish initializing those IOCB fields that are dependent on the 925 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 926 * explicitly reinitialized and for SLI-3 the extended bde count is 927 * explicitly reinitialized since all iocb memory resources are reused. 928 */ 929 if (phba->sli_rev == 3 && 930 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 931 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 932 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 933 /* 934 * The extended IOCB format can only fit 3 BDE or a BPL. 935 * This I/O has more than 3 BDE so the 1st data bde will 936 * be a BPL that is filled in here. 937 */ 938 physaddr = lpfc_cmd->dma_handle; 939 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 940 data_bde->tus.f.bdeSize = (num_bde * 941 sizeof(struct ulp_bde64)); 942 physaddr += (sizeof(struct fcp_cmnd) + 943 sizeof(struct fcp_rsp) + 944 (2 * sizeof(struct ulp_bde64))); 945 data_bde->addrHigh = putPaddrHigh(physaddr); 946 data_bde->addrLow = putPaddrLow(physaddr); 947 /* ebde count includes the response bde and data bpl */ 948 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 949 } else { 950 /* ebde count includes the response bde and data bdes */ 951 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 952 } 953 } else { 954 iocb_cmd->un.fcpi64.bdl.bdeSize = 955 ((num_bde + 2) * sizeof(struct ulp_bde64)); 956 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 957 } 958 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 959 960 /* 961 * Due to difference in data length between DIF/non-DIF paths, 962 * we need to set word 4 of IOCB here 963 */ 964 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 965 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 966 return 0; 967 } 968 969 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 970 971 /* Return BG_ERR_INIT if error injection is detected by Initiator */ 972 #define BG_ERR_INIT 0x1 973 /* Return BG_ERR_TGT if error injection is detected by Target */ 974 #define BG_ERR_TGT 0x2 975 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */ 976 #define BG_ERR_SWAP 0x10 977 /* 978 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for 979 * error injection 980 */ 981 #define BG_ERR_CHECK 0x20 982 983 /** 984 * lpfc_bg_err_inject - Determine if we should inject an error 985 * @phba: The Hba for which this call is being executed. 986 * @sc: The SCSI command to examine 987 * @reftag: (out) BlockGuard reference tag for transmitted data 988 * @apptag: (out) BlockGuard application tag for transmitted data 989 * @new_guard: (in) Value to replace CRC with if needed 990 * 991 * Returns BG_ERR_* bit mask or 0 if request ignored 992 **/ 993 static int 994 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 995 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 996 { 997 struct scatterlist *sgpe; /* s/g prot entry */ 998 struct lpfc_io_buf *lpfc_cmd = NULL; 999 struct scsi_dif_tuple *src = NULL; 1000 struct lpfc_nodelist *ndlp; 1001 struct lpfc_rport_data *rdata; 1002 uint32_t op = scsi_get_prot_op(sc); 1003 uint32_t blksize; 1004 uint32_t numblks; 1005 u32 lba; 1006 int rc = 0; 1007 int blockoff = 0; 1008 1009 if (op == SCSI_PROT_NORMAL) 1010 return 0; 1011 1012 sgpe = scsi_prot_sglist(sc); 1013 lba = t10_pi_ref_tag(sc->request); 1014 if (lba == LPFC_INVALID_REFTAG) 1015 return 0; 1016 1017 /* First check if we need to match the LBA */ 1018 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1019 blksize = lpfc_cmd_blksize(sc); 1020 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1021 1022 /* Make sure we have the right LBA if one is specified */ 1023 if (phba->lpfc_injerr_lba < (u64)lba || 1024 (phba->lpfc_injerr_lba >= (u64)(lba + numblks))) 1025 return 0; 1026 if (sgpe) { 1027 blockoff = phba->lpfc_injerr_lba - (u64)lba; 1028 numblks = sg_dma_len(sgpe) / 1029 sizeof(struct scsi_dif_tuple); 1030 if (numblks < blockoff) 1031 blockoff = numblks; 1032 } 1033 } 1034 1035 /* Next check if we need to match the remote NPortID or WWPN */ 1036 rdata = lpfc_rport_data_from_scsi_device(sc->device); 1037 if (rdata && rdata->pnode) { 1038 ndlp = rdata->pnode; 1039 1040 /* Make sure we have the right NPortID if one is specified */ 1041 if (phba->lpfc_injerr_nportid && 1042 (phba->lpfc_injerr_nportid != ndlp->nlp_DID)) 1043 return 0; 1044 1045 /* 1046 * Make sure we have the right WWPN if one is specified. 1047 * wwn[0] should be a non-zero NAA in a good WWPN. 1048 */ 1049 if (phba->lpfc_injerr_wwpn.u.wwn[0] && 1050 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn, 1051 sizeof(struct lpfc_name)) != 0)) 1052 return 0; 1053 } 1054 1055 /* Setup a ptr to the protection data if the SCSI host provides it */ 1056 if (sgpe) { 1057 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1058 src += blockoff; 1059 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; 1060 } 1061 1062 /* Should we change the Reference Tag */ 1063 if (reftag) { 1064 if (phba->lpfc_injerr_wref_cnt) { 1065 switch (op) { 1066 case SCSI_PROT_WRITE_PASS: 1067 if (src) { 1068 /* 1069 * For WRITE_PASS, force the error 1070 * to be sent on the wire. It should 1071 * be detected by the Target. 1072 * If blockoff != 0 error will be 1073 * inserted in middle of the IO. 1074 */ 1075 1076 lpfc_printf_log(phba, KERN_ERR, 1077 LOG_TRACE_EVENT, 1078 "9076 BLKGRD: Injecting reftag error: " 1079 "write lba x%lx + x%x oldrefTag x%x\n", 1080 (unsigned long)lba, blockoff, 1081 be32_to_cpu(src->ref_tag)); 1082 1083 /* 1084 * Save the old ref_tag so we can 1085 * restore it on completion. 1086 */ 1087 if (lpfc_cmd) { 1088 lpfc_cmd->prot_data_type = 1089 LPFC_INJERR_REFTAG; 1090 lpfc_cmd->prot_data_segment = 1091 src; 1092 lpfc_cmd->prot_data = 1093 src->ref_tag; 1094 } 1095 src->ref_tag = cpu_to_be32(0xDEADBEEF); 1096 phba->lpfc_injerr_wref_cnt--; 1097 if (phba->lpfc_injerr_wref_cnt == 0) { 1098 phba->lpfc_injerr_nportid = 0; 1099 phba->lpfc_injerr_lba = 1100 LPFC_INJERR_LBA_OFF; 1101 memset(&phba->lpfc_injerr_wwpn, 1102 0, sizeof(struct lpfc_name)); 1103 } 1104 rc = BG_ERR_TGT | BG_ERR_CHECK; 1105 1106 break; 1107 } 1108 fallthrough; 1109 case SCSI_PROT_WRITE_INSERT: 1110 /* 1111 * For WRITE_INSERT, force the error 1112 * to be sent on the wire. It should be 1113 * detected by the Target. 1114 */ 1115 /* DEADBEEF will be the reftag on the wire */ 1116 *reftag = 0xDEADBEEF; 1117 phba->lpfc_injerr_wref_cnt--; 1118 if (phba->lpfc_injerr_wref_cnt == 0) { 1119 phba->lpfc_injerr_nportid = 0; 1120 phba->lpfc_injerr_lba = 1121 LPFC_INJERR_LBA_OFF; 1122 memset(&phba->lpfc_injerr_wwpn, 1123 0, sizeof(struct lpfc_name)); 1124 } 1125 rc = BG_ERR_TGT | BG_ERR_CHECK; 1126 1127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1128 "9078 BLKGRD: Injecting reftag error: " 1129 "write lba x%lx\n", (unsigned long)lba); 1130 break; 1131 case SCSI_PROT_WRITE_STRIP: 1132 /* 1133 * For WRITE_STRIP and WRITE_PASS, 1134 * force the error on data 1135 * being copied from SLI-Host to SLI-Port. 1136 */ 1137 *reftag = 0xDEADBEEF; 1138 phba->lpfc_injerr_wref_cnt--; 1139 if (phba->lpfc_injerr_wref_cnt == 0) { 1140 phba->lpfc_injerr_nportid = 0; 1141 phba->lpfc_injerr_lba = 1142 LPFC_INJERR_LBA_OFF; 1143 memset(&phba->lpfc_injerr_wwpn, 1144 0, sizeof(struct lpfc_name)); 1145 } 1146 rc = BG_ERR_INIT; 1147 1148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1149 "9077 BLKGRD: Injecting reftag error: " 1150 "write lba x%lx\n", (unsigned long)lba); 1151 break; 1152 } 1153 } 1154 if (phba->lpfc_injerr_rref_cnt) { 1155 switch (op) { 1156 case SCSI_PROT_READ_INSERT: 1157 case SCSI_PROT_READ_STRIP: 1158 case SCSI_PROT_READ_PASS: 1159 /* 1160 * For READ_STRIP and READ_PASS, force the 1161 * error on data being read off the wire. It 1162 * should force an IO error to the driver. 1163 */ 1164 *reftag = 0xDEADBEEF; 1165 phba->lpfc_injerr_rref_cnt--; 1166 if (phba->lpfc_injerr_rref_cnt == 0) { 1167 phba->lpfc_injerr_nportid = 0; 1168 phba->lpfc_injerr_lba = 1169 LPFC_INJERR_LBA_OFF; 1170 memset(&phba->lpfc_injerr_wwpn, 1171 0, sizeof(struct lpfc_name)); 1172 } 1173 rc = BG_ERR_INIT; 1174 1175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1176 "9079 BLKGRD: Injecting reftag error: " 1177 "read lba x%lx\n", (unsigned long)lba); 1178 break; 1179 } 1180 } 1181 } 1182 1183 /* Should we change the Application Tag */ 1184 if (apptag) { 1185 if (phba->lpfc_injerr_wapp_cnt) { 1186 switch (op) { 1187 case SCSI_PROT_WRITE_PASS: 1188 if (src) { 1189 /* 1190 * For WRITE_PASS, force the error 1191 * to be sent on the wire. It should 1192 * be detected by the Target. 1193 * If blockoff != 0 error will be 1194 * inserted in middle of the IO. 1195 */ 1196 1197 lpfc_printf_log(phba, KERN_ERR, 1198 LOG_TRACE_EVENT, 1199 "9080 BLKGRD: Injecting apptag error: " 1200 "write lba x%lx + x%x oldappTag x%x\n", 1201 (unsigned long)lba, blockoff, 1202 be16_to_cpu(src->app_tag)); 1203 1204 /* 1205 * Save the old app_tag so we can 1206 * restore it on completion. 1207 */ 1208 if (lpfc_cmd) { 1209 lpfc_cmd->prot_data_type = 1210 LPFC_INJERR_APPTAG; 1211 lpfc_cmd->prot_data_segment = 1212 src; 1213 lpfc_cmd->prot_data = 1214 src->app_tag; 1215 } 1216 src->app_tag = cpu_to_be16(0xDEAD); 1217 phba->lpfc_injerr_wapp_cnt--; 1218 if (phba->lpfc_injerr_wapp_cnt == 0) { 1219 phba->lpfc_injerr_nportid = 0; 1220 phba->lpfc_injerr_lba = 1221 LPFC_INJERR_LBA_OFF; 1222 memset(&phba->lpfc_injerr_wwpn, 1223 0, sizeof(struct lpfc_name)); 1224 } 1225 rc = BG_ERR_TGT | BG_ERR_CHECK; 1226 break; 1227 } 1228 fallthrough; 1229 case SCSI_PROT_WRITE_INSERT: 1230 /* 1231 * For WRITE_INSERT, force the 1232 * error to be sent on the wire. It should be 1233 * detected by the Target. 1234 */ 1235 /* DEAD will be the apptag on the wire */ 1236 *apptag = 0xDEAD; 1237 phba->lpfc_injerr_wapp_cnt--; 1238 if (phba->lpfc_injerr_wapp_cnt == 0) { 1239 phba->lpfc_injerr_nportid = 0; 1240 phba->lpfc_injerr_lba = 1241 LPFC_INJERR_LBA_OFF; 1242 memset(&phba->lpfc_injerr_wwpn, 1243 0, sizeof(struct lpfc_name)); 1244 } 1245 rc = BG_ERR_TGT | BG_ERR_CHECK; 1246 1247 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1248 "0813 BLKGRD: Injecting apptag error: " 1249 "write lba x%lx\n", (unsigned long)lba); 1250 break; 1251 case SCSI_PROT_WRITE_STRIP: 1252 /* 1253 * For WRITE_STRIP and WRITE_PASS, 1254 * force the error on data 1255 * being copied from SLI-Host to SLI-Port. 1256 */ 1257 *apptag = 0xDEAD; 1258 phba->lpfc_injerr_wapp_cnt--; 1259 if (phba->lpfc_injerr_wapp_cnt == 0) { 1260 phba->lpfc_injerr_nportid = 0; 1261 phba->lpfc_injerr_lba = 1262 LPFC_INJERR_LBA_OFF; 1263 memset(&phba->lpfc_injerr_wwpn, 1264 0, sizeof(struct lpfc_name)); 1265 } 1266 rc = BG_ERR_INIT; 1267 1268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1269 "0812 BLKGRD: Injecting apptag error: " 1270 "write lba x%lx\n", (unsigned long)lba); 1271 break; 1272 } 1273 } 1274 if (phba->lpfc_injerr_rapp_cnt) { 1275 switch (op) { 1276 case SCSI_PROT_READ_INSERT: 1277 case SCSI_PROT_READ_STRIP: 1278 case SCSI_PROT_READ_PASS: 1279 /* 1280 * For READ_STRIP and READ_PASS, force the 1281 * error on data being read off the wire. It 1282 * should force an IO error to the driver. 1283 */ 1284 *apptag = 0xDEAD; 1285 phba->lpfc_injerr_rapp_cnt--; 1286 if (phba->lpfc_injerr_rapp_cnt == 0) { 1287 phba->lpfc_injerr_nportid = 0; 1288 phba->lpfc_injerr_lba = 1289 LPFC_INJERR_LBA_OFF; 1290 memset(&phba->lpfc_injerr_wwpn, 1291 0, sizeof(struct lpfc_name)); 1292 } 1293 rc = BG_ERR_INIT; 1294 1295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1296 "0814 BLKGRD: Injecting apptag error: " 1297 "read lba x%lx\n", (unsigned long)lba); 1298 break; 1299 } 1300 } 1301 } 1302 1303 1304 /* Should we change the Guard Tag */ 1305 if (new_guard) { 1306 if (phba->lpfc_injerr_wgrd_cnt) { 1307 switch (op) { 1308 case SCSI_PROT_WRITE_PASS: 1309 rc = BG_ERR_CHECK; 1310 fallthrough; 1311 1312 case SCSI_PROT_WRITE_INSERT: 1313 /* 1314 * For WRITE_INSERT, force the 1315 * error to be sent on the wire. It should be 1316 * detected by the Target. 1317 */ 1318 phba->lpfc_injerr_wgrd_cnt--; 1319 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1320 phba->lpfc_injerr_nportid = 0; 1321 phba->lpfc_injerr_lba = 1322 LPFC_INJERR_LBA_OFF; 1323 memset(&phba->lpfc_injerr_wwpn, 1324 0, sizeof(struct lpfc_name)); 1325 } 1326 1327 rc |= BG_ERR_TGT | BG_ERR_SWAP; 1328 /* Signals the caller to swap CRC->CSUM */ 1329 1330 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1331 "0817 BLKGRD: Injecting guard error: " 1332 "write lba x%lx\n", (unsigned long)lba); 1333 break; 1334 case SCSI_PROT_WRITE_STRIP: 1335 /* 1336 * For WRITE_STRIP and WRITE_PASS, 1337 * force the error on data 1338 * being copied from SLI-Host to SLI-Port. 1339 */ 1340 phba->lpfc_injerr_wgrd_cnt--; 1341 if (phba->lpfc_injerr_wgrd_cnt == 0) { 1342 phba->lpfc_injerr_nportid = 0; 1343 phba->lpfc_injerr_lba = 1344 LPFC_INJERR_LBA_OFF; 1345 memset(&phba->lpfc_injerr_wwpn, 1346 0, sizeof(struct lpfc_name)); 1347 } 1348 1349 rc = BG_ERR_INIT | BG_ERR_SWAP; 1350 /* Signals the caller to swap CRC->CSUM */ 1351 1352 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1353 "0816 BLKGRD: Injecting guard error: " 1354 "write lba x%lx\n", (unsigned long)lba); 1355 break; 1356 } 1357 } 1358 if (phba->lpfc_injerr_rgrd_cnt) { 1359 switch (op) { 1360 case SCSI_PROT_READ_INSERT: 1361 case SCSI_PROT_READ_STRIP: 1362 case SCSI_PROT_READ_PASS: 1363 /* 1364 * For READ_STRIP and READ_PASS, force the 1365 * error on data being read off the wire. It 1366 * should force an IO error to the driver. 1367 */ 1368 phba->lpfc_injerr_rgrd_cnt--; 1369 if (phba->lpfc_injerr_rgrd_cnt == 0) { 1370 phba->lpfc_injerr_nportid = 0; 1371 phba->lpfc_injerr_lba = 1372 LPFC_INJERR_LBA_OFF; 1373 memset(&phba->lpfc_injerr_wwpn, 1374 0, sizeof(struct lpfc_name)); 1375 } 1376 1377 rc = BG_ERR_INIT | BG_ERR_SWAP; 1378 /* Signals the caller to swap CRC->CSUM */ 1379 1380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1381 "0818 BLKGRD: Injecting guard error: " 1382 "read lba x%lx\n", (unsigned long)lba); 1383 } 1384 } 1385 } 1386 1387 return rc; 1388 } 1389 #endif 1390 1391 /** 1392 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with 1393 * the specified SCSI command. 1394 * @phba: The Hba for which this call is being executed. 1395 * @sc: The SCSI command to examine 1396 * @txop: (out) BlockGuard operation for transmitted data 1397 * @rxop: (out) BlockGuard operation for received data 1398 * 1399 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1400 * 1401 **/ 1402 static int 1403 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1404 uint8_t *txop, uint8_t *rxop) 1405 { 1406 uint8_t ret = 0; 1407 1408 if (lpfc_cmd_guard_csum(sc)) { 1409 switch (scsi_get_prot_op(sc)) { 1410 case SCSI_PROT_READ_INSERT: 1411 case SCSI_PROT_WRITE_STRIP: 1412 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1413 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1414 break; 1415 1416 case SCSI_PROT_READ_STRIP: 1417 case SCSI_PROT_WRITE_INSERT: 1418 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1419 *txop = BG_OP_IN_NODIF_OUT_CRC; 1420 break; 1421 1422 case SCSI_PROT_READ_PASS: 1423 case SCSI_PROT_WRITE_PASS: 1424 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1425 *txop = BG_OP_IN_CSUM_OUT_CRC; 1426 break; 1427 1428 case SCSI_PROT_NORMAL: 1429 default: 1430 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1431 "9063 BLKGRD: Bad op/guard:%d/IP combination\n", 1432 scsi_get_prot_op(sc)); 1433 ret = 1; 1434 break; 1435 1436 } 1437 } else { 1438 switch (scsi_get_prot_op(sc)) { 1439 case SCSI_PROT_READ_STRIP: 1440 case SCSI_PROT_WRITE_INSERT: 1441 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1442 *txop = BG_OP_IN_NODIF_OUT_CRC; 1443 break; 1444 1445 case SCSI_PROT_READ_PASS: 1446 case SCSI_PROT_WRITE_PASS: 1447 *rxop = BG_OP_IN_CRC_OUT_CRC; 1448 *txop = BG_OP_IN_CRC_OUT_CRC; 1449 break; 1450 1451 case SCSI_PROT_READ_INSERT: 1452 case SCSI_PROT_WRITE_STRIP: 1453 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1454 *txop = BG_OP_IN_CRC_OUT_NODIF; 1455 break; 1456 1457 case SCSI_PROT_NORMAL: 1458 default: 1459 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1460 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n", 1461 scsi_get_prot_op(sc)); 1462 ret = 1; 1463 break; 1464 } 1465 } 1466 1467 return ret; 1468 } 1469 1470 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1471 /** 1472 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with 1473 * the specified SCSI command in order to force a guard tag error. 1474 * @phba: The Hba for which this call is being executed. 1475 * @sc: The SCSI command to examine 1476 * @txop: (out) BlockGuard operation for transmitted data 1477 * @rxop: (out) BlockGuard operation for received data 1478 * 1479 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1480 * 1481 **/ 1482 static int 1483 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1484 uint8_t *txop, uint8_t *rxop) 1485 { 1486 uint8_t ret = 0; 1487 1488 if (lpfc_cmd_guard_csum(sc)) { 1489 switch (scsi_get_prot_op(sc)) { 1490 case SCSI_PROT_READ_INSERT: 1491 case SCSI_PROT_WRITE_STRIP: 1492 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1493 *txop = BG_OP_IN_CRC_OUT_NODIF; 1494 break; 1495 1496 case SCSI_PROT_READ_STRIP: 1497 case SCSI_PROT_WRITE_INSERT: 1498 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1499 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1500 break; 1501 1502 case SCSI_PROT_READ_PASS: 1503 case SCSI_PROT_WRITE_PASS: 1504 *rxop = BG_OP_IN_CSUM_OUT_CRC; 1505 *txop = BG_OP_IN_CRC_OUT_CSUM; 1506 break; 1507 1508 case SCSI_PROT_NORMAL: 1509 default: 1510 break; 1511 1512 } 1513 } else { 1514 switch (scsi_get_prot_op(sc)) { 1515 case SCSI_PROT_READ_STRIP: 1516 case SCSI_PROT_WRITE_INSERT: 1517 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1518 *txop = BG_OP_IN_NODIF_OUT_CSUM; 1519 break; 1520 1521 case SCSI_PROT_READ_PASS: 1522 case SCSI_PROT_WRITE_PASS: 1523 *rxop = BG_OP_IN_CSUM_OUT_CSUM; 1524 *txop = BG_OP_IN_CSUM_OUT_CSUM; 1525 break; 1526 1527 case SCSI_PROT_READ_INSERT: 1528 case SCSI_PROT_WRITE_STRIP: 1529 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1530 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1531 break; 1532 1533 case SCSI_PROT_NORMAL: 1534 default: 1535 break; 1536 } 1537 } 1538 1539 return ret; 1540 } 1541 #endif 1542 1543 /** 1544 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data 1545 * @phba: The Hba for which this call is being executed. 1546 * @sc: pointer to scsi command we're working on 1547 * @bpl: pointer to buffer list for protection groups 1548 * @datasegcnt: number of segments of data that have been dma mapped 1549 * 1550 * This function sets up BPL buffer list for protection groups of 1551 * type LPFC_PG_TYPE_NO_DIF 1552 * 1553 * This is usually used when the HBA is instructed to generate 1554 * DIFs and insert them into data stream (or strip DIF from 1555 * incoming data stream) 1556 * 1557 * The buffer list consists of just one protection group described 1558 * below: 1559 * +-------------------------+ 1560 * start of prot group --> | PDE_5 | 1561 * +-------------------------+ 1562 * | PDE_6 | 1563 * +-------------------------+ 1564 * | Data BDE | 1565 * +-------------------------+ 1566 * |more Data BDE's ... (opt)| 1567 * +-------------------------+ 1568 * 1569 * 1570 * Note: Data s/g buffers have been dma mapped 1571 * 1572 * Returns the number of BDEs added to the BPL. 1573 **/ 1574 static int 1575 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1576 struct ulp_bde64 *bpl, int datasegcnt) 1577 { 1578 struct scatterlist *sgde = NULL; /* s/g data entry */ 1579 struct lpfc_pde5 *pde5 = NULL; 1580 struct lpfc_pde6 *pde6 = NULL; 1581 dma_addr_t physaddr; 1582 int i = 0, num_bde = 0, status; 1583 int datadir = sc->sc_data_direction; 1584 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1585 uint32_t rc; 1586 #endif 1587 uint32_t checking = 1; 1588 uint32_t reftag; 1589 uint8_t txop, rxop; 1590 1591 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1592 if (status) 1593 goto out; 1594 1595 /* extract some info from the scsi command for pde*/ 1596 reftag = t10_pi_ref_tag(sc->request); 1597 if (reftag == LPFC_INVALID_REFTAG) 1598 goto out; 1599 1600 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1601 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1602 if (rc) { 1603 if (rc & BG_ERR_SWAP) 1604 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1605 if (rc & BG_ERR_CHECK) 1606 checking = 0; 1607 } 1608 #endif 1609 1610 /* setup PDE5 with what we have */ 1611 pde5 = (struct lpfc_pde5 *) bpl; 1612 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1613 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1614 1615 /* Endianness conversion if necessary for PDE5 */ 1616 pde5->word0 = cpu_to_le32(pde5->word0); 1617 pde5->reftag = cpu_to_le32(reftag); 1618 1619 /* advance bpl and increment bde count */ 1620 num_bde++; 1621 bpl++; 1622 pde6 = (struct lpfc_pde6 *) bpl; 1623 1624 /* setup PDE6 with the rest of the info */ 1625 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1626 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1627 bf_set(pde6_optx, pde6, txop); 1628 bf_set(pde6_oprx, pde6, rxop); 1629 1630 /* 1631 * We only need to check the data on READs, for WRITEs 1632 * protection data is automatically generated, not checked. 1633 */ 1634 if (datadir == DMA_FROM_DEVICE) { 1635 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 1636 bf_set(pde6_ce, pde6, checking); 1637 else 1638 bf_set(pde6_ce, pde6, 0); 1639 1640 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 1641 bf_set(pde6_re, pde6, checking); 1642 else 1643 bf_set(pde6_re, pde6, 0); 1644 } 1645 bf_set(pde6_ai, pde6, 1); 1646 bf_set(pde6_ae, pde6, 0); 1647 bf_set(pde6_apptagval, pde6, 0); 1648 1649 /* Endianness conversion if necessary for PDE6 */ 1650 pde6->word0 = cpu_to_le32(pde6->word0); 1651 pde6->word1 = cpu_to_le32(pde6->word1); 1652 pde6->word2 = cpu_to_le32(pde6->word2); 1653 1654 /* advance bpl and increment bde count */ 1655 num_bde++; 1656 bpl++; 1657 1658 /* assumption: caller has already run dma_map_sg on command data */ 1659 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1660 physaddr = sg_dma_address(sgde); 1661 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1662 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1663 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1664 if (datadir == DMA_TO_DEVICE) 1665 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1666 else 1667 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1668 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1669 bpl++; 1670 num_bde++; 1671 } 1672 1673 out: 1674 return num_bde; 1675 } 1676 1677 /** 1678 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data 1679 * @phba: The Hba for which this call is being executed. 1680 * @sc: pointer to scsi command we're working on 1681 * @bpl: pointer to buffer list for protection groups 1682 * @datacnt: number of segments of data that have been dma mapped 1683 * @protcnt: number of segment of protection data that have been dma mapped 1684 * 1685 * This function sets up BPL buffer list for protection groups of 1686 * type LPFC_PG_TYPE_DIF 1687 * 1688 * This is usually used when DIFs are in their own buffers, 1689 * separate from the data. The HBA can then by instructed 1690 * to place the DIFs in the outgoing stream. For read operations, 1691 * The HBA could extract the DIFs and place it in DIF buffers. 1692 * 1693 * The buffer list for this type consists of one or more of the 1694 * protection groups described below: 1695 * +-------------------------+ 1696 * start of first prot group --> | PDE_5 | 1697 * +-------------------------+ 1698 * | PDE_6 | 1699 * +-------------------------+ 1700 * | PDE_7 (Prot BDE) | 1701 * +-------------------------+ 1702 * | Data BDE | 1703 * +-------------------------+ 1704 * |more Data BDE's ... (opt)| 1705 * +-------------------------+ 1706 * start of new prot group --> | PDE_5 | 1707 * +-------------------------+ 1708 * | ... | 1709 * +-------------------------+ 1710 * 1711 * Note: It is assumed that both data and protection s/g buffers have been 1712 * mapped for DMA 1713 * 1714 * Returns the number of BDEs added to the BPL. 1715 **/ 1716 static int 1717 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1718 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1719 { 1720 struct scatterlist *sgde = NULL; /* s/g data entry */ 1721 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1722 struct lpfc_pde5 *pde5 = NULL; 1723 struct lpfc_pde6 *pde6 = NULL; 1724 struct lpfc_pde7 *pde7 = NULL; 1725 dma_addr_t dataphysaddr, protphysaddr; 1726 unsigned short curr_data = 0, curr_prot = 0; 1727 unsigned int split_offset; 1728 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 1729 unsigned int protgrp_blks, protgrp_bytes; 1730 unsigned int remainder, subtotal; 1731 int status; 1732 int datadir = sc->sc_data_direction; 1733 unsigned char pgdone = 0, alldone = 0; 1734 unsigned blksize; 1735 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1736 uint32_t rc; 1737 #endif 1738 uint32_t checking = 1; 1739 uint32_t reftag; 1740 uint8_t txop, rxop; 1741 int num_bde = 0; 1742 1743 sgpe = scsi_prot_sglist(sc); 1744 sgde = scsi_sglist(sc); 1745 1746 if (!sgpe || !sgde) { 1747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1748 "9020 Invalid s/g entry: data=x%px prot=x%px\n", 1749 sgpe, sgde); 1750 return 0; 1751 } 1752 1753 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1754 if (status) 1755 goto out; 1756 1757 /* extract some info from the scsi command */ 1758 blksize = lpfc_cmd_blksize(sc); 1759 reftag = t10_pi_ref_tag(sc->request); 1760 if (reftag == LPFC_INVALID_REFTAG) 1761 goto out; 1762 1763 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1764 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1765 if (rc) { 1766 if (rc & BG_ERR_SWAP) 1767 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1768 if (rc & BG_ERR_CHECK) 1769 checking = 0; 1770 } 1771 #endif 1772 1773 split_offset = 0; 1774 do { 1775 /* Check to see if we ran out of space */ 1776 if (num_bde >= (phba->cfg_total_seg_cnt - 2)) 1777 return num_bde + 3; 1778 1779 /* setup PDE5 with what we have */ 1780 pde5 = (struct lpfc_pde5 *) bpl; 1781 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1782 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1783 1784 /* Endianness conversion if necessary for PDE5 */ 1785 pde5->word0 = cpu_to_le32(pde5->word0); 1786 pde5->reftag = cpu_to_le32(reftag); 1787 1788 /* advance bpl and increment bde count */ 1789 num_bde++; 1790 bpl++; 1791 pde6 = (struct lpfc_pde6 *) bpl; 1792 1793 /* setup PDE6 with the rest of the info */ 1794 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1795 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1796 bf_set(pde6_optx, pde6, txop); 1797 bf_set(pde6_oprx, pde6, rxop); 1798 1799 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 1800 bf_set(pde6_ce, pde6, checking); 1801 else 1802 bf_set(pde6_ce, pde6, 0); 1803 1804 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 1805 bf_set(pde6_re, pde6, checking); 1806 else 1807 bf_set(pde6_re, pde6, 0); 1808 1809 bf_set(pde6_ai, pde6, 1); 1810 bf_set(pde6_ae, pde6, 0); 1811 bf_set(pde6_apptagval, pde6, 0); 1812 1813 /* Endianness conversion if necessary for PDE6 */ 1814 pde6->word0 = cpu_to_le32(pde6->word0); 1815 pde6->word1 = cpu_to_le32(pde6->word1); 1816 pde6->word2 = cpu_to_le32(pde6->word2); 1817 1818 /* advance bpl and increment bde count */ 1819 num_bde++; 1820 bpl++; 1821 1822 /* setup the first BDE that points to protection buffer */ 1823 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 1824 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 1825 1826 /* must be integer multiple of the DIF block length */ 1827 BUG_ON(protgroup_len % 8); 1828 1829 pde7 = (struct lpfc_pde7 *) bpl; 1830 memset(pde7, 0, sizeof(struct lpfc_pde7)); 1831 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR); 1832 1833 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); 1834 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); 1835 1836 protgrp_blks = protgroup_len / 8; 1837 protgrp_bytes = protgrp_blks * blksize; 1838 1839 /* check if this pde is crossing the 4K boundary; if so split */ 1840 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) { 1841 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff); 1842 protgroup_offset += protgroup_remainder; 1843 protgrp_blks = protgroup_remainder / 8; 1844 protgrp_bytes = protgrp_blks * blksize; 1845 } else { 1846 protgroup_offset = 0; 1847 curr_prot++; 1848 } 1849 1850 num_bde++; 1851 1852 /* setup BDE's for data blocks associated with DIF data */ 1853 pgdone = 0; 1854 subtotal = 0; /* total bytes processed for current prot grp */ 1855 while (!pgdone) { 1856 /* Check to see if we ran out of space */ 1857 if (num_bde >= phba->cfg_total_seg_cnt) 1858 return num_bde + 1; 1859 1860 if (!sgde) { 1861 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1862 "9065 BLKGRD:%s Invalid data segment\n", 1863 __func__); 1864 return 0; 1865 } 1866 bpl++; 1867 dataphysaddr = sg_dma_address(sgde) + split_offset; 1868 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1869 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1870 1871 remainder = sg_dma_len(sgde) - split_offset; 1872 1873 if ((subtotal + remainder) <= protgrp_bytes) { 1874 /* we can use this whole buffer */ 1875 bpl->tus.f.bdeSize = remainder; 1876 split_offset = 0; 1877 1878 if ((subtotal + remainder) == protgrp_bytes) 1879 pgdone = 1; 1880 } else { 1881 /* must split this buffer with next prot grp */ 1882 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1883 split_offset += bpl->tus.f.bdeSize; 1884 } 1885 1886 subtotal += bpl->tus.f.bdeSize; 1887 1888 if (datadir == DMA_TO_DEVICE) 1889 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1890 else 1891 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1892 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1893 1894 num_bde++; 1895 curr_data++; 1896 1897 if (split_offset) 1898 break; 1899 1900 /* Move to the next s/g segment if possible */ 1901 sgde = sg_next(sgde); 1902 1903 } 1904 1905 if (protgroup_offset) { 1906 /* update the reference tag */ 1907 reftag += protgrp_blks; 1908 bpl++; 1909 continue; 1910 } 1911 1912 /* are we done ? */ 1913 if (curr_prot == protcnt) { 1914 alldone = 1; 1915 } else if (curr_prot < protcnt) { 1916 /* advance to next prot buffer */ 1917 sgpe = sg_next(sgpe); 1918 bpl++; 1919 1920 /* update the reference tag */ 1921 reftag += protgrp_blks; 1922 } else { 1923 /* if we're here, we have a bug */ 1924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1925 "9054 BLKGRD: bug in %s\n", __func__); 1926 } 1927 1928 } while (!alldone); 1929 out: 1930 1931 return num_bde; 1932 } 1933 1934 /** 1935 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data 1936 * @phba: The Hba for which this call is being executed. 1937 * @sc: pointer to scsi command we're working on 1938 * @sgl: pointer to buffer list for protection groups 1939 * @datasegcnt: number of segments of data that have been dma mapped 1940 * @lpfc_cmd: lpfc scsi command object pointer. 1941 * 1942 * This function sets up SGL buffer list for protection groups of 1943 * type LPFC_PG_TYPE_NO_DIF 1944 * 1945 * This is usually used when the HBA is instructed to generate 1946 * DIFs and insert them into data stream (or strip DIF from 1947 * incoming data stream) 1948 * 1949 * The buffer list consists of just one protection group described 1950 * below: 1951 * +-------------------------+ 1952 * start of prot group --> | DI_SEED | 1953 * +-------------------------+ 1954 * | Data SGE | 1955 * +-------------------------+ 1956 * |more Data SGE's ... (opt)| 1957 * +-------------------------+ 1958 * 1959 * 1960 * Note: Data s/g buffers have been dma mapped 1961 * 1962 * Returns the number of SGEs added to the SGL. 1963 **/ 1964 static int 1965 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1966 struct sli4_sge *sgl, int datasegcnt, 1967 struct lpfc_io_buf *lpfc_cmd) 1968 { 1969 struct scatterlist *sgde = NULL; /* s/g data entry */ 1970 struct sli4_sge_diseed *diseed = NULL; 1971 dma_addr_t physaddr; 1972 int i = 0, num_sge = 0, status; 1973 uint32_t reftag; 1974 uint8_t txop, rxop; 1975 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1976 uint32_t rc; 1977 #endif 1978 uint32_t checking = 1; 1979 uint32_t dma_len; 1980 uint32_t dma_offset = 0; 1981 struct sli4_hybrid_sgl *sgl_xtra = NULL; 1982 int j; 1983 bool lsp_just_set = false; 1984 1985 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1986 if (status) 1987 goto out; 1988 1989 /* extract some info from the scsi command for pde*/ 1990 reftag = t10_pi_ref_tag(sc->request); 1991 if (reftag == LPFC_INVALID_REFTAG) 1992 goto out; 1993 1994 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1995 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 1996 if (rc) { 1997 if (rc & BG_ERR_SWAP) 1998 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1999 if (rc & BG_ERR_CHECK) 2000 checking = 0; 2001 } 2002 #endif 2003 2004 /* setup DISEED with what we have */ 2005 diseed = (struct sli4_sge_diseed *) sgl; 2006 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2007 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2008 2009 /* Endianness conversion if necessary */ 2010 diseed->ref_tag = cpu_to_le32(reftag); 2011 diseed->ref_tag_tran = diseed->ref_tag; 2012 2013 /* 2014 * We only need to check the data on READs, for WRITEs 2015 * protection data is automatically generated, not checked. 2016 */ 2017 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2018 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) 2019 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2020 else 2021 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2022 2023 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2024 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2025 else 2026 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2027 } 2028 2029 /* setup DISEED with the rest of the info */ 2030 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2031 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2032 2033 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2034 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2035 2036 /* Endianness conversion if necessary for DISEED */ 2037 diseed->word2 = cpu_to_le32(diseed->word2); 2038 diseed->word3 = cpu_to_le32(diseed->word3); 2039 2040 /* advance bpl and increment sge count */ 2041 num_sge++; 2042 sgl++; 2043 2044 /* assumption: caller has already run dma_map_sg on command data */ 2045 sgde = scsi_sglist(sc); 2046 j = 3; 2047 for (i = 0; i < datasegcnt; i++) { 2048 /* clear it */ 2049 sgl->word2 = 0; 2050 2051 /* do we need to expand the segment */ 2052 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && 2053 ((datasegcnt - 1) != i)) { 2054 /* set LSP type */ 2055 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2056 2057 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2058 2059 if (unlikely(!sgl_xtra)) { 2060 lpfc_cmd->seg_cnt = 0; 2061 return 0; 2062 } 2063 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2064 sgl_xtra->dma_phys_sgl)); 2065 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2066 sgl_xtra->dma_phys_sgl)); 2067 2068 } else { 2069 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); 2070 } 2071 2072 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { 2073 if ((datasegcnt - 1) == i) 2074 bf_set(lpfc_sli4_sge_last, sgl, 1); 2075 physaddr = sg_dma_address(sgde); 2076 dma_len = sg_dma_len(sgde); 2077 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2078 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2079 2080 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2081 sgl->word2 = cpu_to_le32(sgl->word2); 2082 sgl->sge_len = cpu_to_le32(dma_len); 2083 2084 dma_offset += dma_len; 2085 sgde = sg_next(sgde); 2086 2087 sgl++; 2088 num_sge++; 2089 lsp_just_set = false; 2090 2091 } else { 2092 sgl->word2 = cpu_to_le32(sgl->word2); 2093 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2094 2095 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2096 i = i - 1; 2097 2098 lsp_just_set = true; 2099 } 2100 2101 j++; 2102 2103 } 2104 2105 out: 2106 return num_sge; 2107 } 2108 2109 /** 2110 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data 2111 * @phba: The Hba for which this call is being executed. 2112 * @sc: pointer to scsi command we're working on 2113 * @sgl: pointer to buffer list for protection groups 2114 * @datacnt: number of segments of data that have been dma mapped 2115 * @protcnt: number of segment of protection data that have been dma mapped 2116 * @lpfc_cmd: lpfc scsi command object pointer. 2117 * 2118 * This function sets up SGL buffer list for protection groups of 2119 * type LPFC_PG_TYPE_DIF 2120 * 2121 * This is usually used when DIFs are in their own buffers, 2122 * separate from the data. The HBA can then by instructed 2123 * to place the DIFs in the outgoing stream. For read operations, 2124 * The HBA could extract the DIFs and place it in DIF buffers. 2125 * 2126 * The buffer list for this type consists of one or more of the 2127 * protection groups described below: 2128 * +-------------------------+ 2129 * start of first prot group --> | DISEED | 2130 * +-------------------------+ 2131 * | DIF (Prot SGE) | 2132 * +-------------------------+ 2133 * | Data SGE | 2134 * +-------------------------+ 2135 * |more Data SGE's ... (opt)| 2136 * +-------------------------+ 2137 * start of new prot group --> | DISEED | 2138 * +-------------------------+ 2139 * | ... | 2140 * +-------------------------+ 2141 * 2142 * Note: It is assumed that both data and protection s/g buffers have been 2143 * mapped for DMA 2144 * 2145 * Returns the number of SGEs added to the SGL. 2146 **/ 2147 static int 2148 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 2149 struct sli4_sge *sgl, int datacnt, int protcnt, 2150 struct lpfc_io_buf *lpfc_cmd) 2151 { 2152 struct scatterlist *sgde = NULL; /* s/g data entry */ 2153 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 2154 struct sli4_sge_diseed *diseed = NULL; 2155 dma_addr_t dataphysaddr, protphysaddr; 2156 unsigned short curr_data = 0, curr_prot = 0; 2157 unsigned int split_offset; 2158 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; 2159 unsigned int protgrp_blks, protgrp_bytes; 2160 unsigned int remainder, subtotal; 2161 int status; 2162 unsigned char pgdone = 0, alldone = 0; 2163 unsigned blksize; 2164 uint32_t reftag; 2165 uint8_t txop, rxop; 2166 uint32_t dma_len; 2167 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2168 uint32_t rc; 2169 #endif 2170 uint32_t checking = 1; 2171 uint32_t dma_offset = 0; 2172 int num_sge = 0, j = 2; 2173 struct sli4_hybrid_sgl *sgl_xtra = NULL; 2174 2175 sgpe = scsi_prot_sglist(sc); 2176 sgde = scsi_sglist(sc); 2177 2178 if (!sgpe || !sgde) { 2179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2180 "9082 Invalid s/g entry: data=x%px prot=x%px\n", 2181 sgpe, sgde); 2182 return 0; 2183 } 2184 2185 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 2186 if (status) 2187 goto out; 2188 2189 /* extract some info from the scsi command */ 2190 blksize = lpfc_cmd_blksize(sc); 2191 reftag = t10_pi_ref_tag(sc->request); 2192 if (reftag == LPFC_INVALID_REFTAG) 2193 goto out; 2194 2195 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2196 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1); 2197 if (rc) { 2198 if (rc & BG_ERR_SWAP) 2199 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2200 if (rc & BG_ERR_CHECK) 2201 checking = 0; 2202 } 2203 #endif 2204 2205 split_offset = 0; 2206 do { 2207 /* Check to see if we ran out of space */ 2208 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && 2209 !(phba->cfg_xpsgl)) 2210 return num_sge + 3; 2211 2212 /* DISEED and DIF have to be together */ 2213 if (!((j + 1) % phba->border_sge_num) || 2214 !((j + 2) % phba->border_sge_num) || 2215 !((j + 3) % phba->border_sge_num)) { 2216 sgl->word2 = 0; 2217 2218 /* set LSP type */ 2219 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); 2220 2221 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); 2222 2223 if (unlikely(!sgl_xtra)) { 2224 goto out; 2225 } else { 2226 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2227 sgl_xtra->dma_phys_sgl)); 2228 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2229 sgl_xtra->dma_phys_sgl)); 2230 } 2231 2232 sgl->word2 = cpu_to_le32(sgl->word2); 2233 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); 2234 2235 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2236 j = 0; 2237 } 2238 2239 /* setup DISEED with what we have */ 2240 diseed = (struct sli4_sge_diseed *) sgl; 2241 memset(diseed, 0, sizeof(struct sli4_sge_diseed)); 2242 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED); 2243 2244 /* Endianness conversion if necessary */ 2245 diseed->ref_tag = cpu_to_le32(reftag); 2246 diseed->ref_tag_tran = diseed->ref_tag; 2247 2248 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { 2249 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); 2250 2251 } else { 2252 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); 2253 /* 2254 * When in this mode, the hardware will replace 2255 * the guard tag from the host with a 2256 * newly generated good CRC for the wire. 2257 * Switch to raw mode here to avoid this 2258 * behavior. What the host sends gets put on the wire. 2259 */ 2260 if (txop == BG_OP_IN_CRC_OUT_CRC) { 2261 txop = BG_OP_RAW_MODE; 2262 rxop = BG_OP_RAW_MODE; 2263 } 2264 } 2265 2266 2267 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) 2268 bf_set(lpfc_sli4_sge_dif_re, diseed, checking); 2269 else 2270 bf_set(lpfc_sli4_sge_dif_re, diseed, 0); 2271 2272 /* setup DISEED with the rest of the info */ 2273 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); 2274 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); 2275 2276 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); 2277 bf_set(lpfc_sli4_sge_dif_me, diseed, 0); 2278 2279 /* Endianness conversion if necessary for DISEED */ 2280 diseed->word2 = cpu_to_le32(diseed->word2); 2281 diseed->word3 = cpu_to_le32(diseed->word3); 2282 2283 /* advance sgl and increment bde count */ 2284 num_sge++; 2285 2286 sgl++; 2287 j++; 2288 2289 /* setup the first BDE that points to protection buffer */ 2290 protphysaddr = sg_dma_address(sgpe) + protgroup_offset; 2291 protgroup_len = sg_dma_len(sgpe) - protgroup_offset; 2292 2293 /* must be integer multiple of the DIF block length */ 2294 BUG_ON(protgroup_len % 8); 2295 2296 /* Now setup DIF SGE */ 2297 sgl->word2 = 0; 2298 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF); 2299 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); 2300 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); 2301 sgl->word2 = cpu_to_le32(sgl->word2); 2302 sgl->sge_len = 0; 2303 2304 protgrp_blks = protgroup_len / 8; 2305 protgrp_bytes = protgrp_blks * blksize; 2306 2307 /* check if DIF SGE is crossing the 4K boundary; if so split */ 2308 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) { 2309 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff); 2310 protgroup_offset += protgroup_remainder; 2311 protgrp_blks = protgroup_remainder / 8; 2312 protgrp_bytes = protgrp_blks * blksize; 2313 } else { 2314 protgroup_offset = 0; 2315 curr_prot++; 2316 } 2317 2318 num_sge++; 2319 2320 /* setup SGE's for data blocks associated with DIF data */ 2321 pgdone = 0; 2322 subtotal = 0; /* total bytes processed for current prot grp */ 2323 2324 sgl++; 2325 j++; 2326 2327 while (!pgdone) { 2328 /* Check to see if we ran out of space */ 2329 if ((num_sge >= phba->cfg_total_seg_cnt) && 2330 !phba->cfg_xpsgl) 2331 return num_sge + 1; 2332 2333 if (!sgde) { 2334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2335 "9086 BLKGRD:%s Invalid data segment\n", 2336 __func__); 2337 return 0; 2338 } 2339 2340 if (!((j + 1) % phba->border_sge_num)) { 2341 sgl->word2 = 0; 2342 2343 /* set LSP type */ 2344 bf_set(lpfc_sli4_sge_type, sgl, 2345 LPFC_SGE_TYPE_LSP); 2346 2347 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, 2348 lpfc_cmd); 2349 2350 if (unlikely(!sgl_xtra)) { 2351 goto out; 2352 } else { 2353 sgl->addr_lo = cpu_to_le32( 2354 putPaddrLow(sgl_xtra->dma_phys_sgl)); 2355 sgl->addr_hi = cpu_to_le32( 2356 putPaddrHigh(sgl_xtra->dma_phys_sgl)); 2357 } 2358 2359 sgl->word2 = cpu_to_le32(sgl->word2); 2360 sgl->sge_len = cpu_to_le32( 2361 phba->cfg_sg_dma_buf_size); 2362 2363 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 2364 } else { 2365 dataphysaddr = sg_dma_address(sgde) + 2366 split_offset; 2367 2368 remainder = sg_dma_len(sgde) - split_offset; 2369 2370 if ((subtotal + remainder) <= protgrp_bytes) { 2371 /* we can use this whole buffer */ 2372 dma_len = remainder; 2373 split_offset = 0; 2374 2375 if ((subtotal + remainder) == 2376 protgrp_bytes) 2377 pgdone = 1; 2378 } else { 2379 /* must split this buffer with next 2380 * prot grp 2381 */ 2382 dma_len = protgrp_bytes - subtotal; 2383 split_offset += dma_len; 2384 } 2385 2386 subtotal += dma_len; 2387 2388 sgl->word2 = 0; 2389 sgl->addr_lo = cpu_to_le32(putPaddrLow( 2390 dataphysaddr)); 2391 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 2392 dataphysaddr)); 2393 bf_set(lpfc_sli4_sge_last, sgl, 0); 2394 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2395 bf_set(lpfc_sli4_sge_type, sgl, 2396 LPFC_SGE_TYPE_DATA); 2397 2398 sgl->sge_len = cpu_to_le32(dma_len); 2399 dma_offset += dma_len; 2400 2401 num_sge++; 2402 curr_data++; 2403 2404 if (split_offset) { 2405 sgl++; 2406 j++; 2407 break; 2408 } 2409 2410 /* Move to the next s/g segment if possible */ 2411 sgde = sg_next(sgde); 2412 2413 sgl++; 2414 } 2415 2416 j++; 2417 } 2418 2419 if (protgroup_offset) { 2420 /* update the reference tag */ 2421 reftag += protgrp_blks; 2422 continue; 2423 } 2424 2425 /* are we done ? */ 2426 if (curr_prot == protcnt) { 2427 /* mark the last SGL */ 2428 sgl--; 2429 bf_set(lpfc_sli4_sge_last, sgl, 1); 2430 alldone = 1; 2431 } else if (curr_prot < protcnt) { 2432 /* advance to next prot buffer */ 2433 sgpe = sg_next(sgpe); 2434 2435 /* update the reference tag */ 2436 reftag += protgrp_blks; 2437 } else { 2438 /* if we're here, we have a bug */ 2439 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2440 "9085 BLKGRD: bug in %s\n", __func__); 2441 } 2442 2443 } while (!alldone); 2444 2445 out: 2446 2447 return num_sge; 2448 } 2449 2450 /** 2451 * lpfc_prot_group_type - Get prtotection group type of SCSI command 2452 * @phba: The Hba for which this call is being executed. 2453 * @sc: pointer to scsi command we're working on 2454 * 2455 * Given a SCSI command that supports DIF, determine composition of protection 2456 * groups involved in setting up buffer lists 2457 * 2458 * Returns: Protection group type (with or without DIF) 2459 * 2460 **/ 2461 static int 2462 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2463 { 2464 int ret = LPFC_PG_TYPE_INVALID; 2465 unsigned char op = scsi_get_prot_op(sc); 2466 2467 switch (op) { 2468 case SCSI_PROT_READ_STRIP: 2469 case SCSI_PROT_WRITE_INSERT: 2470 ret = LPFC_PG_TYPE_NO_DIF; 2471 break; 2472 case SCSI_PROT_READ_INSERT: 2473 case SCSI_PROT_WRITE_STRIP: 2474 case SCSI_PROT_READ_PASS: 2475 case SCSI_PROT_WRITE_PASS: 2476 ret = LPFC_PG_TYPE_DIF_BUF; 2477 break; 2478 default: 2479 if (phba) 2480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2481 "9021 Unsupported protection op:%d\n", 2482 op); 2483 break; 2484 } 2485 return ret; 2486 } 2487 2488 /** 2489 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard 2490 * @phba: The Hba for which this call is being executed. 2491 * @lpfc_cmd: The scsi buffer which is going to be adjusted. 2492 * 2493 * Adjust the data length to account for how much data 2494 * is actually on the wire. 2495 * 2496 * returns the adjusted data length 2497 **/ 2498 static int 2499 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, 2500 struct lpfc_io_buf *lpfc_cmd) 2501 { 2502 struct scsi_cmnd *sc = lpfc_cmd->pCmd; 2503 int fcpdl; 2504 2505 fcpdl = scsi_bufflen(sc); 2506 2507 /* Check if there is protection data on the wire */ 2508 if (sc->sc_data_direction == DMA_FROM_DEVICE) { 2509 /* Read check for protection data */ 2510 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) 2511 return fcpdl; 2512 2513 } else { 2514 /* Write check for protection data */ 2515 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) 2516 return fcpdl; 2517 } 2518 2519 /* 2520 * If we are in DIF Type 1 mode every data block has a 8 byte 2521 * DIF (trailer) attached to it. Must ajust FCP data length 2522 * to account for the protection data. 2523 */ 2524 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; 2525 2526 return fcpdl; 2527 } 2528 2529 /** 2530 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 2531 * @phba: The Hba for which this call is being executed. 2532 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 2533 * 2534 * This is the protection/DIF aware version of 2535 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2536 * two functions eventually, but for now, it's here. 2537 * RETURNS 0 - SUCCESS, 2538 * 1 - Failed DMA map, retry. 2539 * 2 - Invalid scsi cmd or prot-type. Do not rety. 2540 **/ 2541 static int 2542 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, 2543 struct lpfc_io_buf *lpfc_cmd) 2544 { 2545 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2546 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2547 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; 2548 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2549 uint32_t num_bde = 0; 2550 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 2551 int prot_group_type = 0; 2552 int fcpdl; 2553 int ret = 1; 2554 struct lpfc_vport *vport = phba->pport; 2555 2556 /* 2557 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 2558 * fcp_rsp regions to the first data bde entry 2559 */ 2560 bpl += 2; 2561 if (scsi_sg_count(scsi_cmnd)) { 2562 /* 2563 * The driver stores the segment count returned from pci_map_sg 2564 * because this a count of dma-mappings used to map the use_sg 2565 * pages. They are not guaranteed to be the same for those 2566 * architectures that implement an IOMMU. 2567 */ 2568 datasegcnt = dma_map_sg(&phba->pcidev->dev, 2569 scsi_sglist(scsi_cmnd), 2570 scsi_sg_count(scsi_cmnd), datadir); 2571 if (unlikely(!datasegcnt)) 2572 return 1; 2573 2574 lpfc_cmd->seg_cnt = datasegcnt; 2575 2576 /* First check if data segment count from SCSI Layer is good */ 2577 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2578 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 2579 ret = 2; 2580 goto err; 2581 } 2582 2583 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 2584 2585 switch (prot_group_type) { 2586 case LPFC_PG_TYPE_NO_DIF: 2587 2588 /* Here we need to add a PDE5 and PDE6 to the count */ 2589 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { 2590 ret = 2; 2591 goto err; 2592 } 2593 2594 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 2595 datasegcnt); 2596 /* we should have 2 or more entries in buffer list */ 2597 if (num_bde < 2) { 2598 ret = 2; 2599 goto err; 2600 } 2601 break; 2602 2603 case LPFC_PG_TYPE_DIF_BUF: 2604 /* 2605 * This type indicates that protection buffers are 2606 * passed to the driver, so that needs to be prepared 2607 * for DMA 2608 */ 2609 protsegcnt = dma_map_sg(&phba->pcidev->dev, 2610 scsi_prot_sglist(scsi_cmnd), 2611 scsi_prot_sg_count(scsi_cmnd), datadir); 2612 if (unlikely(!protsegcnt)) { 2613 scsi_dma_unmap(scsi_cmnd); 2614 return 1; 2615 } 2616 2617 lpfc_cmd->prot_seg_cnt = protsegcnt; 2618 2619 /* 2620 * There is a minimun of 4 BPLs used for every 2621 * protection data segment. 2622 */ 2623 if ((lpfc_cmd->prot_seg_cnt * 4) > 2624 (phba->cfg_total_seg_cnt - 2)) { 2625 ret = 2; 2626 goto err; 2627 } 2628 2629 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 2630 datasegcnt, protsegcnt); 2631 /* we should have 3 or more entries in buffer list */ 2632 if ((num_bde < 3) || 2633 (num_bde > phba->cfg_total_seg_cnt)) { 2634 ret = 2; 2635 goto err; 2636 } 2637 break; 2638 2639 case LPFC_PG_TYPE_INVALID: 2640 default: 2641 scsi_dma_unmap(scsi_cmnd); 2642 lpfc_cmd->seg_cnt = 0; 2643 2644 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2645 "9022 Unexpected protection group %i\n", 2646 prot_group_type); 2647 return 2; 2648 } 2649 } 2650 2651 /* 2652 * Finish initializing those IOCB fields that are dependent on the 2653 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 2654 * reinitialized since all iocb memory resources are used many times 2655 * for transmit, receive, and continuation bpl's. 2656 */ 2657 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 2658 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 2659 iocb_cmd->ulpBdeCount = 1; 2660 iocb_cmd->ulpLe = 1; 2661 2662 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 2663 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 2664 2665 /* 2666 * Due to difference in data length between DIF/non-DIF paths, 2667 * we need to set word 4 of IOCB here 2668 */ 2669 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 2670 2671 /* 2672 * For First burst, we may need to adjust the initial transfer 2673 * length for DIF 2674 */ 2675 if (iocb_cmd->un.fcpi.fcpi_XRdy && 2676 (fcpdl < vport->cfg_first_burst_size)) 2677 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl; 2678 2679 return 0; 2680 err: 2681 if (lpfc_cmd->seg_cnt) 2682 scsi_dma_unmap(scsi_cmnd); 2683 if (lpfc_cmd->prot_seg_cnt) 2684 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 2685 scsi_prot_sg_count(scsi_cmnd), 2686 scsi_cmnd->sc_data_direction); 2687 2688 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2689 "9023 Cannot setup S/G List for HBA" 2690 "IO segs %d/%d BPL %d SCSI %d: %d %d\n", 2691 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 2692 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 2693 prot_group_type, num_bde); 2694 2695 lpfc_cmd->seg_cnt = 0; 2696 lpfc_cmd->prot_seg_cnt = 0; 2697 return ret; 2698 } 2699 2700 /* 2701 * This function calcuates the T10 DIF guard tag 2702 * on the specified data using a CRC algorithmn 2703 * using crc_t10dif. 2704 */ 2705 static uint16_t 2706 lpfc_bg_crc(uint8_t *data, int count) 2707 { 2708 uint16_t crc = 0; 2709 uint16_t x; 2710 2711 crc = crc_t10dif(data, count); 2712 x = cpu_to_be16(crc); 2713 return x; 2714 } 2715 2716 /* 2717 * This function calcuates the T10 DIF guard tag 2718 * on the specified data using a CSUM algorithmn 2719 * using ip_compute_csum. 2720 */ 2721 static uint16_t 2722 lpfc_bg_csum(uint8_t *data, int count) 2723 { 2724 uint16_t ret; 2725 2726 ret = ip_compute_csum(data, count); 2727 return ret; 2728 } 2729 2730 /* 2731 * This function examines the protection data to try to determine 2732 * what type of T10-DIF error occurred. 2733 */ 2734 static void 2735 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 2736 { 2737 struct scatterlist *sgpe; /* s/g prot entry */ 2738 struct scatterlist *sgde; /* s/g data entry */ 2739 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2740 struct scsi_dif_tuple *src = NULL; 2741 uint8_t *data_src = NULL; 2742 uint16_t guard_tag; 2743 uint16_t start_app_tag, app_tag; 2744 uint32_t start_ref_tag, ref_tag; 2745 int prot, protsegcnt; 2746 int err_type, len, data_len; 2747 int chk_ref, chk_app, chk_guard; 2748 uint16_t sum; 2749 unsigned blksize; 2750 2751 err_type = BGS_GUARD_ERR_MASK; 2752 sum = 0; 2753 guard_tag = 0; 2754 2755 /* First check to see if there is protection data to examine */ 2756 prot = scsi_get_prot_op(cmd); 2757 if ((prot == SCSI_PROT_READ_STRIP) || 2758 (prot == SCSI_PROT_WRITE_INSERT) || 2759 (prot == SCSI_PROT_NORMAL)) 2760 goto out; 2761 2762 /* Currently the driver just supports ref_tag and guard_tag checking */ 2763 chk_ref = 1; 2764 chk_app = 0; 2765 chk_guard = 0; 2766 2767 /* Setup a ptr to the protection data provided by the SCSI host */ 2768 sgpe = scsi_prot_sglist(cmd); 2769 protsegcnt = lpfc_cmd->prot_seg_cnt; 2770 2771 if (sgpe && protsegcnt) { 2772 2773 /* 2774 * We will only try to verify guard tag if the segment 2775 * data length is a multiple of the blksize. 2776 */ 2777 sgde = scsi_sglist(cmd); 2778 blksize = lpfc_cmd_blksize(cmd); 2779 data_src = (uint8_t *)sg_virt(sgde); 2780 data_len = sgde->length; 2781 if ((data_len & (blksize - 1)) == 0) 2782 chk_guard = 1; 2783 2784 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2785 start_ref_tag = t10_pi_ref_tag(cmd->request); 2786 if (start_ref_tag == LPFC_INVALID_REFTAG) 2787 goto out; 2788 start_app_tag = src->app_tag; 2789 len = sgpe->length; 2790 while (src && protsegcnt) { 2791 while (len) { 2792 2793 /* 2794 * First check to see if a protection data 2795 * check is valid 2796 */ 2797 if ((src->ref_tag == T10_PI_REF_ESCAPE) || 2798 (src->app_tag == T10_PI_APP_ESCAPE)) { 2799 start_ref_tag++; 2800 goto skipit; 2801 } 2802 2803 /* First Guard Tag checking */ 2804 if (chk_guard) { 2805 guard_tag = src->guard_tag; 2806 if (lpfc_cmd_guard_csum(cmd)) 2807 sum = lpfc_bg_csum(data_src, 2808 blksize); 2809 else 2810 sum = lpfc_bg_crc(data_src, 2811 blksize); 2812 if ((guard_tag != sum)) { 2813 err_type = BGS_GUARD_ERR_MASK; 2814 goto out; 2815 } 2816 } 2817 2818 /* Reference Tag checking */ 2819 ref_tag = be32_to_cpu(src->ref_tag); 2820 if (chk_ref && (ref_tag != start_ref_tag)) { 2821 err_type = BGS_REFTAG_ERR_MASK; 2822 goto out; 2823 } 2824 start_ref_tag++; 2825 2826 /* App Tag checking */ 2827 app_tag = src->app_tag; 2828 if (chk_app && (app_tag != start_app_tag)) { 2829 err_type = BGS_APPTAG_ERR_MASK; 2830 goto out; 2831 } 2832 skipit: 2833 len -= sizeof(struct scsi_dif_tuple); 2834 if (len < 0) 2835 len = 0; 2836 src++; 2837 2838 data_src += blksize; 2839 data_len -= blksize; 2840 2841 /* 2842 * Are we at the end of the Data segment? 2843 * The data segment is only used for Guard 2844 * tag checking. 2845 */ 2846 if (chk_guard && (data_len == 0)) { 2847 chk_guard = 0; 2848 sgde = sg_next(sgde); 2849 if (!sgde) 2850 goto out; 2851 2852 data_src = (uint8_t *)sg_virt(sgde); 2853 data_len = sgde->length; 2854 if ((data_len & (blksize - 1)) == 0) 2855 chk_guard = 1; 2856 } 2857 } 2858 2859 /* Goto the next Protection data segment */ 2860 sgpe = sg_next(sgpe); 2861 if (sgpe) { 2862 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 2863 len = sgpe->length; 2864 } else { 2865 src = NULL; 2866 } 2867 protsegcnt--; 2868 } 2869 } 2870 out: 2871 if (err_type == BGS_GUARD_ERR_MASK) { 2872 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2873 0x10, 0x1); 2874 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2875 SAM_STAT_CHECK_CONDITION; 2876 phba->bg_guard_err_cnt++; 2877 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2878 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", 2879 t10_pi_ref_tag(cmd->request), 2880 sum, guard_tag); 2881 2882 } else if (err_type == BGS_REFTAG_ERR_MASK) { 2883 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2884 0x10, 0x3); 2885 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2886 SAM_STAT_CHECK_CONDITION; 2887 2888 phba->bg_reftag_err_cnt++; 2889 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2890 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", 2891 t10_pi_ref_tag(cmd->request), 2892 ref_tag, start_ref_tag); 2893 2894 } else if (err_type == BGS_APPTAG_ERR_MASK) { 2895 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2896 0x10, 0x2); 2897 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2898 SAM_STAT_CHECK_CONDITION; 2899 2900 phba->bg_apptag_err_cnt++; 2901 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2902 "9041 BLKGRD: reftag %x app_tag err %x != %x\n", 2903 t10_pi_ref_tag(cmd->request), 2904 app_tag, start_app_tag); 2905 } 2906 } 2907 2908 /* 2909 * This function checks for BlockGuard errors detected by 2910 * the HBA. In case of errors, the ASC/ASCQ fields in the 2911 * sense buffer will be set accordingly, paired with 2912 * ILLEGAL_REQUEST to signal to the kernel that the HBA 2913 * detected corruption. 2914 * 2915 * Returns: 2916 * 0 - No error found 2917 * 1 - BlockGuard error found 2918 * -1 - Internal error (bad profile, ...etc) 2919 */ 2920 static int 2921 lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 2922 struct lpfc_wcqe_complete *wcqe) 2923 { 2924 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2925 int ret = 0; 2926 u32 status = bf_get(lpfc_wcqe_c_status, wcqe); 2927 u32 bghm = 0; 2928 u32 bgstat = 0; 2929 u64 failing_sector = 0; 2930 2931 if (status == CQE_STATUS_DI_ERROR) { 2932 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 2933 bgstat |= BGS_GUARD_ERR_MASK; 2934 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */ 2935 bgstat |= BGS_APPTAG_ERR_MASK; 2936 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */ 2937 bgstat |= BGS_REFTAG_ERR_MASK; 2938 2939 /* Check to see if there was any good data before the error */ 2940 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 2941 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK; 2942 bghm = wcqe->total_data_placed; 2943 } 2944 2945 /* 2946 * Set ALL the error bits to indicate we don't know what 2947 * type of error it is. 2948 */ 2949 if (!bgstat) 2950 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 2951 BGS_GUARD_ERR_MASK); 2952 } 2953 2954 if (lpfc_bgs_get_guard_err(bgstat)) { 2955 ret = 1; 2956 2957 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2958 0x10, 0x1); 2959 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2960 SAM_STAT_CHECK_CONDITION; 2961 phba->bg_guard_err_cnt++; 2962 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2963 "9059 BLKGRD: Guard Tag error in cmd" 2964 " 0x%x lba 0x%llx blk cnt 0x%x " 2965 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2966 (unsigned long long)scsi_get_lba(cmd), 2967 blk_rq_sectors(cmd->request), bgstat, bghm); 2968 } 2969 2970 if (lpfc_bgs_get_reftag_err(bgstat)) { 2971 ret = 1; 2972 2973 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2974 0x10, 0x3); 2975 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2976 SAM_STAT_CHECK_CONDITION; 2977 2978 phba->bg_reftag_err_cnt++; 2979 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2980 "9060 BLKGRD: Ref Tag error in cmd" 2981 " 0x%x lba 0x%llx blk cnt 0x%x " 2982 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 2983 (unsigned long long)scsi_get_lba(cmd), 2984 blk_rq_sectors(cmd->request), bgstat, bghm); 2985 } 2986 2987 if (lpfc_bgs_get_apptag_err(bgstat)) { 2988 ret = 1; 2989 2990 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2991 0x10, 0x2); 2992 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 2993 SAM_STAT_CHECK_CONDITION; 2994 2995 phba->bg_apptag_err_cnt++; 2996 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 2997 "9062 BLKGRD: App Tag error in cmd" 2998 " 0x%x lba 0x%llx blk cnt 0x%x " 2999 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3000 (unsigned long long)scsi_get_lba(cmd), 3001 blk_rq_sectors(cmd->request), bgstat, bghm); 3002 } 3003 3004 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3005 /* 3006 * setup sense data descriptor 0 per SPC-4 as an information 3007 * field, and put the failing LBA in it. 3008 * This code assumes there was also a guard/app/ref tag error 3009 * indication. 3010 */ 3011 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3012 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3013 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3014 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3015 3016 /* bghm is a "on the wire" FC frame based count */ 3017 switch (scsi_get_prot_op(cmd)) { 3018 case SCSI_PROT_READ_INSERT: 3019 case SCSI_PROT_WRITE_STRIP: 3020 bghm /= cmd->device->sector_size; 3021 break; 3022 case SCSI_PROT_READ_STRIP: 3023 case SCSI_PROT_WRITE_INSERT: 3024 case SCSI_PROT_READ_PASS: 3025 case SCSI_PROT_WRITE_PASS: 3026 bghm /= (cmd->device->sector_size + 3027 sizeof(struct scsi_dif_tuple)); 3028 break; 3029 } 3030 3031 failing_sector = scsi_get_lba(cmd); 3032 failing_sector += bghm; 3033 3034 /* Descriptor Information */ 3035 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3036 } 3037 3038 if (!ret) { 3039 /* No error was reported - problem in FW? */ 3040 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3041 "9068 BLKGRD: Unknown error in cmd" 3042 " 0x%x lba 0x%llx blk cnt 0x%x " 3043 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3044 (unsigned long long)scsi_get_lba(cmd), 3045 blk_rq_sectors(cmd->request), bgstat, bghm); 3046 3047 /* Calcuate what type of error it was */ 3048 lpfc_calc_bg_err(phba, lpfc_cmd); 3049 } 3050 return ret; 3051 } 3052 3053 /* 3054 * This function checks for BlockGuard errors detected by 3055 * the HBA. In case of errors, the ASC/ASCQ fields in the 3056 * sense buffer will be set accordingly, paired with 3057 * ILLEGAL_REQUEST to signal to the kernel that the HBA 3058 * detected corruption. 3059 * 3060 * Returns: 3061 * 0 - No error found 3062 * 1 - BlockGuard error found 3063 * -1 - Internal error (bad profile, ...etc) 3064 */ 3065 static int 3066 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, 3067 struct lpfc_iocbq *pIocbOut) 3068 { 3069 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 3070 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 3071 int ret = 0; 3072 uint32_t bghm = bgf->bghm; 3073 uint32_t bgstat = bgf->bgstat; 3074 uint64_t failing_sector = 0; 3075 3076 if (lpfc_bgs_get_invalid_prof(bgstat)) { 3077 cmd->result = DID_ERROR << 16; 3078 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3079 "9072 BLKGRD: Invalid BG Profile in cmd " 3080 "0x%x reftag 0x%x blk cnt 0x%x " 3081 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3082 t10_pi_ref_tag(cmd->request), 3083 blk_rq_sectors(cmd->request), bgstat, bghm); 3084 ret = (-1); 3085 goto out; 3086 } 3087 3088 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 3089 cmd->result = DID_ERROR << 16; 3090 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3091 "9073 BLKGRD: Invalid BG PDIF Block in cmd " 3092 "0x%x reftag 0x%x blk cnt 0x%x " 3093 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3094 t10_pi_ref_tag(cmd->request), 3095 blk_rq_sectors(cmd->request), bgstat, bghm); 3096 ret = (-1); 3097 goto out; 3098 } 3099 3100 if (lpfc_bgs_get_guard_err(bgstat)) { 3101 ret = 1; 3102 3103 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3104 0x10, 0x1); 3105 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3106 SAM_STAT_CHECK_CONDITION; 3107 phba->bg_guard_err_cnt++; 3108 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3109 "9055 BLKGRD: Guard Tag error in cmd " 3110 "0x%x reftag 0x%x blk cnt 0x%x " 3111 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3112 t10_pi_ref_tag(cmd->request), 3113 blk_rq_sectors(cmd->request), bgstat, bghm); 3114 } 3115 3116 if (lpfc_bgs_get_reftag_err(bgstat)) { 3117 ret = 1; 3118 3119 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3120 0x10, 0x3); 3121 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3122 SAM_STAT_CHECK_CONDITION; 3123 3124 phba->bg_reftag_err_cnt++; 3125 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3126 "9056 BLKGRD: Ref Tag error in cmd " 3127 "0x%x reftag 0x%x blk cnt 0x%x " 3128 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3129 t10_pi_ref_tag(cmd->request), 3130 blk_rq_sectors(cmd->request), bgstat, bghm); 3131 } 3132 3133 if (lpfc_bgs_get_apptag_err(bgstat)) { 3134 ret = 1; 3135 3136 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 3137 0x10, 0x2); 3138 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | 3139 SAM_STAT_CHECK_CONDITION; 3140 3141 phba->bg_apptag_err_cnt++; 3142 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3143 "9061 BLKGRD: App Tag error in cmd " 3144 "0x%x reftag 0x%x blk cnt 0x%x " 3145 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3146 t10_pi_ref_tag(cmd->request), 3147 blk_rq_sectors(cmd->request), bgstat, bghm); 3148 } 3149 3150 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 3151 /* 3152 * setup sense data descriptor 0 per SPC-4 as an information 3153 * field, and put the failing LBA in it. 3154 * This code assumes there was also a guard/app/ref tag error 3155 * indication. 3156 */ 3157 cmd->sense_buffer[7] = 0xc; /* Additional sense length */ 3158 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 3159 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 3160 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 3161 3162 /* bghm is a "on the wire" FC frame based count */ 3163 switch (scsi_get_prot_op(cmd)) { 3164 case SCSI_PROT_READ_INSERT: 3165 case SCSI_PROT_WRITE_STRIP: 3166 bghm /= cmd->device->sector_size; 3167 break; 3168 case SCSI_PROT_READ_STRIP: 3169 case SCSI_PROT_WRITE_INSERT: 3170 case SCSI_PROT_READ_PASS: 3171 case SCSI_PROT_WRITE_PASS: 3172 bghm /= (cmd->device->sector_size + 3173 sizeof(struct scsi_dif_tuple)); 3174 break; 3175 } 3176 3177 failing_sector = scsi_get_lba(cmd); 3178 failing_sector += bghm; 3179 3180 /* Descriptor Information */ 3181 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]); 3182 } 3183 3184 if (!ret) { 3185 /* No error was reported - problem in FW? */ 3186 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, 3187 "9057 BLKGRD: Unknown error in cmd " 3188 "0x%x reftag 0x%x blk cnt 0x%x " 3189 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], 3190 t10_pi_ref_tag(cmd->request), 3191 blk_rq_sectors(cmd->request), bgstat, bghm); 3192 3193 /* Calcuate what type of error it was */ 3194 lpfc_calc_bg_err(phba, lpfc_cmd); 3195 } 3196 out: 3197 return ret; 3198 } 3199 3200 /** 3201 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3202 * @phba: The Hba for which this call is being executed. 3203 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3204 * 3205 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 3206 * field of @lpfc_cmd for device with SLI-4 interface spec. 3207 * 3208 * Return codes: 3209 * 2 - Error - Do not retry 3210 * 1 - Error - Retry 3211 * 0 - Success 3212 **/ 3213 static int 3214 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3215 { 3216 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3217 struct scatterlist *sgel = NULL; 3218 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3219 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 3220 struct sli4_sge *first_data_sgl; 3221 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3222 struct lpfc_vport *vport = phba->pport; 3223 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3224 dma_addr_t physaddr; 3225 uint32_t num_bde = 0; 3226 uint32_t dma_len; 3227 uint32_t dma_offset = 0; 3228 int nseg, i, j; 3229 struct ulp_bde64 *bde; 3230 bool lsp_just_set = false; 3231 struct sli4_hybrid_sgl *sgl_xtra = NULL; 3232 3233 /* 3234 * There are three possibilities here - use scatter-gather segment, use 3235 * the single mapping, or neither. Start the lpfc command prep by 3236 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 3237 * data bde entry. 3238 */ 3239 if (scsi_sg_count(scsi_cmnd)) { 3240 /* 3241 * The driver stores the segment count returned from pci_map_sg 3242 * because this a count of dma-mappings used to map the use_sg 3243 * pages. They are not guaranteed to be the same for those 3244 * architectures that implement an IOMMU. 3245 */ 3246 3247 nseg = scsi_dma_map(scsi_cmnd); 3248 if (unlikely(nseg <= 0)) 3249 return 1; 3250 sgl += 1; 3251 /* clear the last flag in the fcp_rsp map entry */ 3252 sgl->word2 = le32_to_cpu(sgl->word2); 3253 bf_set(lpfc_sli4_sge_last, sgl, 0); 3254 sgl->word2 = cpu_to_le32(sgl->word2); 3255 sgl += 1; 3256 first_data_sgl = sgl; 3257 lpfc_cmd->seg_cnt = nseg; 3258 if (!phba->cfg_xpsgl && 3259 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 3260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3261 "9074 BLKGRD:" 3262 " %s: Too many sg segments from " 3263 "dma_map_sg. Config %d, seg_cnt %d\n", 3264 __func__, phba->cfg_sg_seg_cnt, 3265 lpfc_cmd->seg_cnt); 3266 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3267 lpfc_cmd->seg_cnt = 0; 3268 scsi_dma_unmap(scsi_cmnd); 3269 return 2; 3270 } 3271 3272 /* 3273 * The driver established a maximum scatter-gather segment count 3274 * during probe that limits the number of sg elements in any 3275 * single scsi command. Just run through the seg_cnt and format 3276 * the sge's. 3277 * When using SLI-3 the driver will try to fit all the BDEs into 3278 * the IOCB. If it can't then the BDEs get added to a BPL as it 3279 * does for SLI-2 mode. 3280 */ 3281 3282 /* for tracking segment boundaries */ 3283 sgel = scsi_sglist(scsi_cmnd); 3284 j = 2; 3285 for (i = 0; i < nseg; i++) { 3286 sgl->word2 = 0; 3287 if ((num_bde + 1) == nseg) { 3288 bf_set(lpfc_sli4_sge_last, sgl, 1); 3289 bf_set(lpfc_sli4_sge_type, sgl, 3290 LPFC_SGE_TYPE_DATA); 3291 } else { 3292 bf_set(lpfc_sli4_sge_last, sgl, 0); 3293 3294 /* do we need to expand the segment */ 3295 if (!lsp_just_set && 3296 !((j + 1) % phba->border_sge_num) && 3297 ((nseg - 1) != i)) { 3298 /* set LSP type */ 3299 bf_set(lpfc_sli4_sge_type, sgl, 3300 LPFC_SGE_TYPE_LSP); 3301 3302 sgl_xtra = lpfc_get_sgl_per_hdwq( 3303 phba, lpfc_cmd); 3304 3305 if (unlikely(!sgl_xtra)) { 3306 lpfc_cmd->seg_cnt = 0; 3307 scsi_dma_unmap(scsi_cmnd); 3308 return 1; 3309 } 3310 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3311 sgl_xtra->dma_phys_sgl)); 3312 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3313 sgl_xtra->dma_phys_sgl)); 3314 3315 } else { 3316 bf_set(lpfc_sli4_sge_type, sgl, 3317 LPFC_SGE_TYPE_DATA); 3318 } 3319 } 3320 3321 if (!(bf_get(lpfc_sli4_sge_type, sgl) & 3322 LPFC_SGE_TYPE_LSP)) { 3323 if ((nseg - 1) == i) 3324 bf_set(lpfc_sli4_sge_last, sgl, 1); 3325 3326 physaddr = sg_dma_address(sgel); 3327 dma_len = sg_dma_len(sgel); 3328 sgl->addr_lo = cpu_to_le32(putPaddrLow( 3329 physaddr)); 3330 sgl->addr_hi = cpu_to_le32(putPaddrHigh( 3331 physaddr)); 3332 3333 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 3334 sgl->word2 = cpu_to_le32(sgl->word2); 3335 sgl->sge_len = cpu_to_le32(dma_len); 3336 3337 dma_offset += dma_len; 3338 sgel = sg_next(sgel); 3339 3340 sgl++; 3341 lsp_just_set = false; 3342 3343 } else { 3344 sgl->word2 = cpu_to_le32(sgl->word2); 3345 sgl->sge_len = cpu_to_le32( 3346 phba->cfg_sg_dma_buf_size); 3347 3348 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; 3349 i = i - 1; 3350 3351 lsp_just_set = true; 3352 } 3353 3354 j++; 3355 } 3356 /* 3357 * Setup the first Payload BDE. For FCoE we just key off 3358 * Performance Hints, for FC we use lpfc_enable_pbde. 3359 * We populate words 13-15 of IOCB/WQE. 3360 */ 3361 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3362 phba->cfg_enable_pbde) { 3363 bde = (struct ulp_bde64 *) 3364 &wqe->words[13]; 3365 bde->addrLow = first_data_sgl->addr_lo; 3366 bde->addrHigh = first_data_sgl->addr_hi; 3367 bde->tus.f.bdeSize = 3368 le32_to_cpu(first_data_sgl->sge_len); 3369 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 3370 bde->tus.w = cpu_to_le32(bde->tus.w); 3371 3372 } else { 3373 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); 3374 } 3375 } else { 3376 sgl += 1; 3377 /* clear the last flag in the fcp_rsp map entry */ 3378 sgl->word2 = le32_to_cpu(sgl->word2); 3379 bf_set(lpfc_sli4_sge_last, sgl, 1); 3380 sgl->word2 = cpu_to_le32(sgl->word2); 3381 3382 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || 3383 phba->cfg_enable_pbde) { 3384 bde = (struct ulp_bde64 *) 3385 &wqe->words[13]; 3386 memset(bde, 0, (sizeof(uint32_t) * 3)); 3387 } 3388 } 3389 3390 /* Word 11 */ 3391 if (phba->cfg_enable_pbde) 3392 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1); 3393 3394 /* 3395 * Finish initializing those IOCB fields that are dependent on the 3396 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 3397 * explicitly reinitialized. 3398 * all iocb memory resources are reused. 3399 */ 3400 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 3401 /* Set first-burst provided it was successfully negotiated */ 3402 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3403 vport->cfg_first_burst_size && 3404 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3405 u32 init_len, total_len; 3406 3407 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3408 init_len = min(total_len, vport->cfg_first_burst_size); 3409 3410 /* Word 4 & 5 */ 3411 wqe->fcp_iwrite.initial_xfer_len = init_len; 3412 wqe->fcp_iwrite.total_xfer_len = total_len; 3413 } else { 3414 /* Word 4 */ 3415 wqe->fcp_iwrite.total_xfer_len = 3416 be32_to_cpu(fcp_cmnd->fcpDl); 3417 } 3418 3419 /* 3420 * If the OAS driver feature is enabled and the lun is enabled for 3421 * OAS, set the oas iocb related flags. 3422 */ 3423 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3424 scsi_cmnd->device->hostdata)->oas_enabled) { 3425 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3426 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) 3427 scsi_cmnd->device->hostdata)->priority; 3428 3429 /* Word 10 */ 3430 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3431 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3432 3433 if (lpfc_cmd->cur_iocbq.priority) 3434 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3435 (lpfc_cmd->cur_iocbq.priority << 1)); 3436 else 3437 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3438 (phba->cfg_XLanePriority << 1)); 3439 } 3440 3441 return 0; 3442 } 3443 3444 /** 3445 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 3446 * @phba: The Hba for which this call is being executed. 3447 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3448 * 3449 * This is the protection/DIF aware version of 3450 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 3451 * two functions eventually, but for now, it's here 3452 * Return codes: 3453 * 2 - Error - Do not retry 3454 * 1 - Error - Retry 3455 * 0 - Success 3456 **/ 3457 static int 3458 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, 3459 struct lpfc_io_buf *lpfc_cmd) 3460 { 3461 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 3462 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 3463 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); 3464 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 3465 union lpfc_wqe128 *wqe = &pwqeq->wqe; 3466 uint32_t num_sge = 0; 3467 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 3468 int prot_group_type = 0; 3469 int fcpdl; 3470 int ret = 1; 3471 struct lpfc_vport *vport = phba->pport; 3472 3473 /* 3474 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd 3475 * fcp_rsp regions to the first data sge entry 3476 */ 3477 if (scsi_sg_count(scsi_cmnd)) { 3478 /* 3479 * The driver stores the segment count returned from pci_map_sg 3480 * because this a count of dma-mappings used to map the use_sg 3481 * pages. They are not guaranteed to be the same for those 3482 * architectures that implement an IOMMU. 3483 */ 3484 datasegcnt = dma_map_sg(&phba->pcidev->dev, 3485 scsi_sglist(scsi_cmnd), 3486 scsi_sg_count(scsi_cmnd), datadir); 3487 if (unlikely(!datasegcnt)) 3488 return 1; 3489 3490 sgl += 1; 3491 /* clear the last flag in the fcp_rsp map entry */ 3492 sgl->word2 = le32_to_cpu(sgl->word2); 3493 bf_set(lpfc_sli4_sge_last, sgl, 0); 3494 sgl->word2 = cpu_to_le32(sgl->word2); 3495 3496 sgl += 1; 3497 lpfc_cmd->seg_cnt = datasegcnt; 3498 3499 /* First check if data segment count from SCSI Layer is good */ 3500 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && 3501 !phba->cfg_xpsgl) { 3502 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); 3503 ret = 2; 3504 goto err; 3505 } 3506 3507 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 3508 3509 switch (prot_group_type) { 3510 case LPFC_PG_TYPE_NO_DIF: 3511 /* Here we need to add a DISEED to the count */ 3512 if (((lpfc_cmd->seg_cnt + 1) > 3513 phba->cfg_total_seg_cnt) && 3514 !phba->cfg_xpsgl) { 3515 ret = 2; 3516 goto err; 3517 } 3518 3519 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, 3520 datasegcnt, lpfc_cmd); 3521 3522 /* we should have 2 or more entries in buffer list */ 3523 if (num_sge < 2) { 3524 ret = 2; 3525 goto err; 3526 } 3527 break; 3528 3529 case LPFC_PG_TYPE_DIF_BUF: 3530 /* 3531 * This type indicates that protection buffers are 3532 * passed to the driver, so that needs to be prepared 3533 * for DMA 3534 */ 3535 protsegcnt = dma_map_sg(&phba->pcidev->dev, 3536 scsi_prot_sglist(scsi_cmnd), 3537 scsi_prot_sg_count(scsi_cmnd), datadir); 3538 if (unlikely(!protsegcnt)) { 3539 scsi_dma_unmap(scsi_cmnd); 3540 return 1; 3541 } 3542 3543 lpfc_cmd->prot_seg_cnt = protsegcnt; 3544 /* 3545 * There is a minimun of 3 SGEs used for every 3546 * protection data segment. 3547 */ 3548 if (((lpfc_cmd->prot_seg_cnt * 3) > 3549 (phba->cfg_total_seg_cnt - 2)) && 3550 !phba->cfg_xpsgl) { 3551 ret = 2; 3552 goto err; 3553 } 3554 3555 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, 3556 datasegcnt, protsegcnt, lpfc_cmd); 3557 3558 /* we should have 3 or more entries in buffer list */ 3559 if (num_sge < 3 || 3560 (num_sge > phba->cfg_total_seg_cnt && 3561 !phba->cfg_xpsgl)) { 3562 ret = 2; 3563 goto err; 3564 } 3565 break; 3566 3567 case LPFC_PG_TYPE_INVALID: 3568 default: 3569 scsi_dma_unmap(scsi_cmnd); 3570 lpfc_cmd->seg_cnt = 0; 3571 3572 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3573 "9083 Unexpected protection group %i\n", 3574 prot_group_type); 3575 return 2; 3576 } 3577 } 3578 3579 switch (scsi_get_prot_op(scsi_cmnd)) { 3580 case SCSI_PROT_WRITE_STRIP: 3581 case SCSI_PROT_READ_STRIP: 3582 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP; 3583 break; 3584 case SCSI_PROT_WRITE_INSERT: 3585 case SCSI_PROT_READ_INSERT: 3586 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT; 3587 break; 3588 case SCSI_PROT_WRITE_PASS: 3589 case SCSI_PROT_READ_PASS: 3590 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS; 3591 break; 3592 } 3593 3594 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); 3595 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 3596 3597 /* Set first-burst provided it was successfully negotiated */ 3598 if (!(phba->hba_flag & HBA_FCOE_MODE) && 3599 vport->cfg_first_burst_size && 3600 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) { 3601 u32 init_len, total_len; 3602 3603 total_len = be32_to_cpu(fcp_cmnd->fcpDl); 3604 init_len = min(total_len, vport->cfg_first_burst_size); 3605 3606 /* Word 4 & 5 */ 3607 wqe->fcp_iwrite.initial_xfer_len = init_len; 3608 wqe->fcp_iwrite.total_xfer_len = total_len; 3609 } else { 3610 /* Word 4 */ 3611 wqe->fcp_iwrite.total_xfer_len = 3612 be32_to_cpu(fcp_cmnd->fcpDl); 3613 } 3614 3615 /* 3616 * If the OAS driver feature is enabled and the lun is enabled for 3617 * OAS, set the oas iocb related flags. 3618 */ 3619 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3620 scsi_cmnd->device->hostdata)->oas_enabled) { 3621 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF); 3622 3623 /* Word 10 */ 3624 bf_set(wqe_oas, &wqe->generic.wqe_com, 1); 3625 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1); 3626 bf_set(wqe_ccp, &wqe->generic.wqe_com, 3627 (phba->cfg_XLanePriority << 1)); 3628 } 3629 3630 /* Word 7. DIF Flags */ 3631 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS) 3632 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 3633 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP) 3634 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 3635 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT) 3636 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 3637 3638 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS | 3639 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT); 3640 3641 return 0; 3642 err: 3643 if (lpfc_cmd->seg_cnt) 3644 scsi_dma_unmap(scsi_cmnd); 3645 if (lpfc_cmd->prot_seg_cnt) 3646 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), 3647 scsi_prot_sg_count(scsi_cmnd), 3648 scsi_cmnd->sc_data_direction); 3649 3650 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3651 "9084 Cannot setup S/G List for HBA" 3652 "IO segs %d/%d SGL %d SCSI %d: %d %d\n", 3653 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, 3654 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, 3655 prot_group_type, num_sge); 3656 3657 lpfc_cmd->seg_cnt = 0; 3658 lpfc_cmd->prot_seg_cnt = 0; 3659 return ret; 3660 } 3661 3662 /** 3663 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3664 * @phba: The Hba for which this call is being executed. 3665 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3666 * 3667 * This routine wraps the actual DMA mapping function pointer from the 3668 * lpfc_hba struct. 3669 * 3670 * Return codes: 3671 * 1 - Error 3672 * 0 - Success 3673 **/ 3674 static inline int 3675 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3676 { 3677 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3678 } 3679 3680 /** 3681 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3682 * using BlockGuard. 3683 * @phba: The Hba for which this call is being executed. 3684 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3685 * 3686 * This routine wraps the actual DMA mapping function pointer from the 3687 * lpfc_hba struct. 3688 * 3689 * Return codes: 3690 * 1 - Error 3691 * 0 - Success 3692 **/ 3693 static inline int 3694 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) 3695 { 3696 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3697 } 3698 3699 /** 3700 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi 3701 * buffer 3702 * @vport: Pointer to vport object. 3703 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3704 * @tmo: Timeout value for IO 3705 * 3706 * This routine initializes IOCB/WQE data structure from scsi command 3707 * 3708 * Return codes: 3709 * 1 - Error 3710 * 0 - Success 3711 **/ 3712 static inline int 3713 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3714 uint8_t tmo) 3715 { 3716 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo); 3717 } 3718 3719 /** 3720 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3721 * @phba: Pointer to hba context object. 3722 * @vport: Pointer to vport object. 3723 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 3724 * @fcpi_parm: FCP Initiator parameter. 3725 * 3726 * This function posts an event when there is a SCSI command reporting 3727 * error from the scsi device. 3728 **/ 3729 static void 3730 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 3731 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) { 3732 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3733 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3734 uint32_t resp_info = fcprsp->rspStatus2; 3735 uint32_t scsi_status = fcprsp->rspStatus3; 3736 struct lpfc_fast_path_event *fast_path_evt = NULL; 3737 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 3738 unsigned long flags; 3739 3740 if (!pnode) 3741 return; 3742 3743 /* If there is queuefull or busy condition send a scsi event */ 3744 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 3745 (cmnd->result == SAM_STAT_BUSY)) { 3746 fast_path_evt = lpfc_alloc_fast_evt(phba); 3747 if (!fast_path_evt) 3748 return; 3749 fast_path_evt->un.scsi_evt.event_type = 3750 FC_REG_SCSI_EVENT; 3751 fast_path_evt->un.scsi_evt.subcategory = 3752 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 3753 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 3754 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 3755 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 3756 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3757 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 3758 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3759 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 3760 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 3761 fast_path_evt = lpfc_alloc_fast_evt(phba); 3762 if (!fast_path_evt) 3763 return; 3764 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 3765 FC_REG_SCSI_EVENT; 3766 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 3767 LPFC_EVENT_CHECK_COND; 3768 fast_path_evt->un.check_cond_evt.scsi_event.lun = 3769 cmnd->device->lun; 3770 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 3771 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3772 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 3773 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3774 fast_path_evt->un.check_cond_evt.sense_key = 3775 cmnd->sense_buffer[2] & 0xf; 3776 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 3777 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 3778 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 3779 fcpi_parm && 3780 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 3781 ((scsi_status == SAM_STAT_GOOD) && 3782 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 3783 /* 3784 * If status is good or resid does not match with fcp_param and 3785 * there is valid fcpi_parm, then there is a read_check error 3786 */ 3787 fast_path_evt = lpfc_alloc_fast_evt(phba); 3788 if (!fast_path_evt) 3789 return; 3790 fast_path_evt->un.read_check_error.header.event_type = 3791 FC_REG_FABRIC_EVENT; 3792 fast_path_evt->un.read_check_error.header.subcategory = 3793 LPFC_EVENT_FCPRDCHKERR; 3794 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 3795 &pnode->nlp_portname, sizeof(struct lpfc_name)); 3796 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 3797 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3798 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 3799 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 3800 fast_path_evt->un.read_check_error.fcpiparam = 3801 fcpi_parm; 3802 } else 3803 return; 3804 3805 fast_path_evt->vport = vport; 3806 spin_lock_irqsave(&phba->hbalock, flags); 3807 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 3808 spin_unlock_irqrestore(&phba->hbalock, flags); 3809 lpfc_worker_wake_up(phba); 3810 return; 3811 } 3812 3813 /** 3814 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 3815 * @phba: The HBA for which this call is being executed. 3816 * @psb: The scsi buffer which is going to be un-mapped. 3817 * 3818 * This routine does DMA un-mapping of scatter gather list of scsi command 3819 * field of @lpfc_cmd for device with SLI-3 interface spec. 3820 **/ 3821 static void 3822 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) 3823 { 3824 /* 3825 * There are only two special cases to consider. (1) the scsi command 3826 * requested scatter-gather usage or (2) the scsi command allocated 3827 * a request buffer, but did not request use_sg. There is a third 3828 * case, but it does not require resource deallocation. 3829 */ 3830 if (psb->seg_cnt > 0) 3831 scsi_dma_unmap(psb->pCmd); 3832 if (psb->prot_seg_cnt > 0) 3833 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 3834 scsi_prot_sg_count(psb->pCmd), 3835 psb->pCmd->sc_data_direction); 3836 } 3837 3838 /** 3839 * lpfc_handle_fcp_err - FCP response handler 3840 * @vport: The virtual port for which this call is being executed. 3841 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 3842 * @fcpi_parm: FCP Initiator parameter. 3843 * 3844 * This routine is called to process response IOCB with status field 3845 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 3846 * based upon SCSI and FCP error. 3847 **/ 3848 static void 3849 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 3850 uint32_t fcpi_parm) 3851 { 3852 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 3853 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 3854 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 3855 uint32_t resp_info = fcprsp->rspStatus2; 3856 uint32_t scsi_status = fcprsp->rspStatus3; 3857 uint32_t *lp; 3858 uint32_t host_status = DID_OK; 3859 uint32_t rsplen = 0; 3860 uint32_t fcpDl; 3861 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 3862 3863 3864 /* 3865 * If this is a task management command, there is no 3866 * scsi packet associated with this lpfc_cmd. The driver 3867 * consumes it. 3868 */ 3869 if (fcpcmd->fcpCntl2) { 3870 scsi_status = 0; 3871 goto out; 3872 } 3873 3874 if (resp_info & RSP_LEN_VALID) { 3875 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3876 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 3877 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3878 "2719 Invalid response length: " 3879 "tgt x%x lun x%llx cmnd x%x rsplen " 3880 "x%x\n", cmnd->device->id, 3881 cmnd->device->lun, cmnd->cmnd[0], 3882 rsplen); 3883 host_status = DID_ERROR; 3884 goto out; 3885 } 3886 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 3887 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3888 "2757 Protocol failure detected during " 3889 "processing of FCP I/O op: " 3890 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n", 3891 cmnd->device->id, 3892 cmnd->device->lun, cmnd->cmnd[0], 3893 fcprsp->rspInfo3); 3894 host_status = DID_ERROR; 3895 goto out; 3896 } 3897 } 3898 3899 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 3900 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 3901 if (snslen > SCSI_SENSE_BUFFERSIZE) 3902 snslen = SCSI_SENSE_BUFFERSIZE; 3903 3904 if (resp_info & RSP_LEN_VALID) 3905 rsplen = be32_to_cpu(fcprsp->rspRspLen); 3906 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 3907 } 3908 lp = (uint32_t *)cmnd->sense_buffer; 3909 3910 /* special handling for under run conditions */ 3911 if (!scsi_status && (resp_info & RESID_UNDER)) { 3912 /* don't log under runs if fcp set... */ 3913 if (vport->cfg_log_verbose & LOG_FCP) 3914 logit = LOG_FCP_ERROR; 3915 /* unless operator says so */ 3916 if (vport->cfg_log_verbose & LOG_FCP_UNDER) 3917 logit = LOG_FCP_UNDER; 3918 } 3919 3920 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3921 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3922 "Data: x%x x%x x%x x%x x%x\n", 3923 cmnd->cmnd[0], scsi_status, 3924 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 3925 be32_to_cpu(fcprsp->rspResId), 3926 be32_to_cpu(fcprsp->rspSnsLen), 3927 be32_to_cpu(fcprsp->rspRspLen), 3928 fcprsp->rspInfo3); 3929 3930 scsi_set_resid(cmnd, 0); 3931 fcpDl = be32_to_cpu(fcpcmd->fcpDl); 3932 if (resp_info & RESID_UNDER) { 3933 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 3934 3935 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, 3936 "9025 FCP Underrun, expected %d, " 3937 "residual %d Data: x%x x%x x%x\n", 3938 fcpDl, 3939 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 3940 cmnd->underflow); 3941 3942 /* 3943 * If there is an under run, check if under run reported by 3944 * storage array is same as the under run reported by HBA. 3945 * If this is not same, there is a dropped frame. 3946 */ 3947 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) { 3948 lpfc_printf_vlog(vport, KERN_WARNING, 3949 LOG_FCP | LOG_FCP_ERROR, 3950 "9026 FCP Read Check Error " 3951 "and Underrun Data: x%x x%x x%x x%x\n", 3952 fcpDl, 3953 scsi_get_resid(cmnd), fcpi_parm, 3954 cmnd->cmnd[0]); 3955 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 3956 host_status = DID_ERROR; 3957 } 3958 /* 3959 * The cmnd->underflow is the minimum number of bytes that must 3960 * be transferred for this command. Provided a sense condition 3961 * is not present, make sure the actual amount transferred is at 3962 * least the underflow value or fail. 3963 */ 3964 if (!(resp_info & SNS_LEN_VALID) && 3965 (scsi_status == SAM_STAT_GOOD) && 3966 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 3967 < cmnd->underflow)) { 3968 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3969 "9027 FCP command x%x residual " 3970 "underrun converted to error " 3971 "Data: x%x x%x x%x\n", 3972 cmnd->cmnd[0], scsi_bufflen(cmnd), 3973 scsi_get_resid(cmnd), cmnd->underflow); 3974 host_status = DID_ERROR; 3975 } 3976 } else if (resp_info & RESID_OVER) { 3977 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3978 "9028 FCP command x%x residual overrun error. " 3979 "Data: x%x x%x\n", cmnd->cmnd[0], 3980 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 3981 host_status = DID_ERROR; 3982 3983 /* 3984 * Check SLI validation that all the transfer was actually done 3985 * (fcpi_parm should be zero). Apply check only to reads. 3986 */ 3987 } else if (fcpi_parm) { 3988 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3989 "9029 FCP %s Check Error Data: " 3990 "x%x x%x x%x x%x x%x\n", 3991 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ? 3992 "Read" : "Write"), 3993 fcpDl, be32_to_cpu(fcprsp->rspResId), 3994 fcpi_parm, cmnd->cmnd[0], scsi_status); 3995 3996 /* There is some issue with the LPe12000 that causes it 3997 * to miscalculate the fcpi_parm and falsely trip this 3998 * recovery logic. Detect this case and don't error when true. 3999 */ 4000 if (fcpi_parm > fcpDl) 4001 goto out; 4002 4003 switch (scsi_status) { 4004 case SAM_STAT_GOOD: 4005 case SAM_STAT_CHECK_CONDITION: 4006 /* Fabric dropped a data frame. Fail any successful 4007 * command in which we detected dropped frames. 4008 * A status of good or some check conditions could 4009 * be considered a successful command. 4010 */ 4011 host_status = DID_ERROR; 4012 break; 4013 } 4014 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 4015 } 4016 4017 out: 4018 cmnd->result = host_status << 16 | scsi_status; 4019 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm); 4020 } 4021 4022 /** 4023 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO 4024 * @phba: The hba for which this call is being executed. 4025 * @pwqeIn: The command WQE for the scsi cmnd. 4026 * @wcqe: Pointer to driver response CQE object. 4027 * 4028 * This routine assigns scsi command result by looking into response WQE 4029 * status field appropriately. This routine handles QUEUE FULL condition as 4030 * well by ramping down device queue depth. 4031 **/ 4032 static void 4033 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 4034 struct lpfc_wcqe_complete *wcqe) 4035 { 4036 struct lpfc_io_buf *lpfc_cmd = 4037 (struct lpfc_io_buf *)pwqeIn->context1; 4038 struct lpfc_vport *vport = pwqeIn->vport; 4039 struct lpfc_rport_data *rdata; 4040 struct lpfc_nodelist *ndlp; 4041 struct scsi_cmnd *cmd; 4042 unsigned long flags; 4043 struct lpfc_fast_path_event *fast_path_evt; 4044 struct Scsi_Host *shost; 4045 u32 logit = LOG_FCP; 4046 u32 status, idx; 4047 unsigned long iflags = 0; 4048 4049 /* Sanity check on return of outstanding command */ 4050 if (!lpfc_cmd) { 4051 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4052 "9032 Null lpfc_cmd pointer. No " 4053 "release, skip completion\n"); 4054 return; 4055 } 4056 4057 rdata = lpfc_cmd->rdata; 4058 ndlp = rdata->pnode; 4059 4060 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 4061 /* TOREMOVE - currently this flag is checked during 4062 * the release of lpfc_iocbq. Remove once we move 4063 * to lpfc_wqe_job construct. 4064 * 4065 * This needs to be done outside buf_lock 4066 */ 4067 spin_lock_irqsave(&phba->hbalock, iflags); 4068 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY; 4069 spin_unlock_irqrestore(&phba->hbalock, iflags); 4070 } 4071 4072 /* Guard against abort handler being called at same time */ 4073 spin_lock(&lpfc_cmd->buf_lock); 4074 4075 /* Sanity check on return of outstanding command */ 4076 cmd = lpfc_cmd->pCmd; 4077 if (!cmd) { 4078 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4079 "9042 I/O completion: Not an active IO\n"); 4080 spin_unlock(&lpfc_cmd->buf_lock); 4081 lpfc_release_scsi_buf(phba, lpfc_cmd); 4082 return; 4083 } 4084 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4085 if (phba->sli4_hba.hdwq) 4086 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4087 4088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4089 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4090 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4091 #endif 4092 shost = cmd->device->host; 4093 4094 status = bf_get(lpfc_wcqe_c_status, wcqe); 4095 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); 4096 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); 4097 4098 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4099 if (bf_get(lpfc_wcqe_c_xb, wcqe)) 4100 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4101 4102 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4103 if (lpfc_cmd->prot_data_type) { 4104 struct scsi_dif_tuple *src = NULL; 4105 4106 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4107 /* 4108 * Used to restore any changes to protection 4109 * data for error injection. 4110 */ 4111 switch (lpfc_cmd->prot_data_type) { 4112 case LPFC_INJERR_REFTAG: 4113 src->ref_tag = 4114 lpfc_cmd->prot_data; 4115 break; 4116 case LPFC_INJERR_APPTAG: 4117 src->app_tag = 4118 (uint16_t)lpfc_cmd->prot_data; 4119 break; 4120 case LPFC_INJERR_GUARD: 4121 src->guard_tag = 4122 (uint16_t)lpfc_cmd->prot_data; 4123 break; 4124 default: 4125 break; 4126 } 4127 4128 lpfc_cmd->prot_data = 0; 4129 lpfc_cmd->prot_data_type = 0; 4130 lpfc_cmd->prot_data_segment = NULL; 4131 } 4132 #endif 4133 if (unlikely(lpfc_cmd->status)) { 4134 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4135 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4136 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4137 else if (lpfc_cmd->status >= IOSTAT_CNT) 4138 lpfc_cmd->status = IOSTAT_DEFAULT; 4139 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4140 !lpfc_cmd->fcp_rsp->rspStatus3 && 4141 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4142 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4143 logit = 0; 4144 else 4145 logit = LOG_FCP | LOG_FCP_UNDER; 4146 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4147 "9034 FCP cmd x%x failed <%d/%lld> " 4148 "status: x%x result: x%x " 4149 "sid: x%x did: x%x oxid: x%x " 4150 "Data: x%x x%x x%x\n", 4151 cmd->cmnd[0], 4152 cmd->device ? cmd->device->id : 0xffff, 4153 cmd->device ? cmd->device->lun : 0xffff, 4154 lpfc_cmd->status, lpfc_cmd->result, 4155 vport->fc_myDID, 4156 (ndlp) ? ndlp->nlp_DID : 0, 4157 lpfc_cmd->cur_iocbq.sli4_xritag, 4158 wcqe->parameter, wcqe->total_data_placed, 4159 lpfc_cmd->cur_iocbq.iotag); 4160 } 4161 4162 switch (lpfc_cmd->status) { 4163 case IOSTAT_SUCCESS: 4164 cmd->result = DID_OK << 16; 4165 break; 4166 case IOSTAT_FCP_RSP_ERROR: 4167 lpfc_handle_fcp_err(vport, lpfc_cmd, 4168 pwqeIn->wqe.fcp_iread.total_xfer_len - 4169 wcqe->total_data_placed); 4170 break; 4171 case IOSTAT_NPORT_BSY: 4172 case IOSTAT_FABRIC_BSY: 4173 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4174 fast_path_evt = lpfc_alloc_fast_evt(phba); 4175 if (!fast_path_evt) 4176 break; 4177 fast_path_evt->un.fabric_evt.event_type = 4178 FC_REG_FABRIC_EVENT; 4179 fast_path_evt->un.fabric_evt.subcategory = 4180 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4181 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4182 if (ndlp) { 4183 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4184 &ndlp->nlp_portname, 4185 sizeof(struct lpfc_name)); 4186 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4187 &ndlp->nlp_nodename, 4188 sizeof(struct lpfc_name)); 4189 } 4190 fast_path_evt->vport = vport; 4191 fast_path_evt->work_evt.evt = 4192 LPFC_EVT_FASTPATH_MGMT_EVT; 4193 spin_lock_irqsave(&phba->hbalock, flags); 4194 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4195 &phba->work_list); 4196 spin_unlock_irqrestore(&phba->hbalock, flags); 4197 lpfc_worker_wake_up(phba); 4198 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4199 "9035 Fabric/Node busy FCP cmd x%x failed" 4200 " <%d/%lld> " 4201 "status: x%x result: x%x " 4202 "sid: x%x did: x%x oxid: x%x " 4203 "Data: x%x x%x x%x\n", 4204 cmd->cmnd[0], 4205 cmd->device ? cmd->device->id : 0xffff, 4206 cmd->device ? cmd->device->lun : 0xffff, 4207 lpfc_cmd->status, lpfc_cmd->result, 4208 vport->fc_myDID, 4209 (ndlp) ? ndlp->nlp_DID : 0, 4210 lpfc_cmd->cur_iocbq.sli4_xritag, 4211 wcqe->parameter, 4212 wcqe->total_data_placed, 4213 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4214 break; 4215 case IOSTAT_REMOTE_STOP: 4216 if (ndlp) { 4217 /* This I/O was aborted by the target, we don't 4218 * know the rxid and because we did not send the 4219 * ABTS we cannot generate and RRQ. 4220 */ 4221 lpfc_set_rrq_active(phba, ndlp, 4222 lpfc_cmd->cur_iocbq.sli4_lxritag, 4223 0, 0); 4224 } 4225 fallthrough; 4226 case IOSTAT_LOCAL_REJECT: 4227 if (lpfc_cmd->result & IOERR_DRVR_MASK) 4228 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4229 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4230 lpfc_cmd->result == 4231 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4232 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4233 lpfc_cmd->result == 4234 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4235 cmd->result = DID_NO_CONNECT << 16; 4236 break; 4237 } 4238 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4239 lpfc_cmd->result == IOERR_NO_RESOURCES || 4240 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4241 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4242 cmd->result = DID_REQUEUE << 16; 4243 break; 4244 } 4245 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4246 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4247 status == CQE_STATUS_DI_ERROR) { 4248 if (scsi_get_prot_op(cmd) != 4249 SCSI_PROT_NORMAL) { 4250 /* 4251 * This is a response for a BG enabled 4252 * cmd. Parse BG error 4253 */ 4254 lpfc_sli4_parse_bg_err(phba, lpfc_cmd, 4255 wcqe); 4256 break; 4257 } 4258 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4259 "9040 non-zero BGSTAT on unprotected cmd\n"); 4260 } 4261 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4262 "9036 Local Reject FCP cmd x%x failed" 4263 " <%d/%lld> " 4264 "status: x%x result: x%x " 4265 "sid: x%x did: x%x oxid: x%x " 4266 "Data: x%x x%x x%x\n", 4267 cmd->cmnd[0], 4268 cmd->device ? cmd->device->id : 0xffff, 4269 cmd->device ? cmd->device->lun : 0xffff, 4270 lpfc_cmd->status, lpfc_cmd->result, 4271 vport->fc_myDID, 4272 (ndlp) ? ndlp->nlp_DID : 0, 4273 lpfc_cmd->cur_iocbq.sli4_xritag, 4274 wcqe->parameter, 4275 wcqe->total_data_placed, 4276 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4277 fallthrough; 4278 default: 4279 if (lpfc_cmd->status >= IOSTAT_CNT) 4280 lpfc_cmd->status = IOSTAT_DEFAULT; 4281 cmd->result = DID_ERROR << 16; 4282 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, 4283 "9037 FCP Completion Error: xri %x " 4284 "status x%x result x%x [x%x] " 4285 "placed x%x\n", 4286 lpfc_cmd->cur_iocbq.sli4_xritag, 4287 lpfc_cmd->status, lpfc_cmd->result, 4288 wcqe->parameter, 4289 wcqe->total_data_placed); 4290 } 4291 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4292 u32 *lp = (u32 *)cmd->sense_buffer; 4293 4294 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4295 "9039 Iodone <%d/%llu> cmd x%px, error " 4296 "x%x SNS x%x x%x Data: x%x x%x\n", 4297 cmd->device->id, cmd->device->lun, cmd, 4298 cmd->result, *lp, *(lp + 3), cmd->retries, 4299 scsi_get_resid(cmd)); 4300 } 4301 4302 lpfc_update_stats(vport, lpfc_cmd); 4303 4304 if (vport->cfg_max_scsicmpl_time && 4305 time_after(jiffies, lpfc_cmd->start_time + 4306 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4307 spin_lock_irqsave(shost->host_lock, flags); 4308 if (ndlp) { 4309 if (ndlp->cmd_qdepth > 4310 atomic_read(&ndlp->cmd_pending) && 4311 (atomic_read(&ndlp->cmd_pending) > 4312 LPFC_MIN_TGT_QDEPTH) && 4313 (cmd->cmnd[0] == READ_10 || 4314 cmd->cmnd[0] == WRITE_10)) 4315 ndlp->cmd_qdepth = 4316 atomic_read(&ndlp->cmd_pending); 4317 4318 ndlp->last_change_time = jiffies; 4319 } 4320 spin_unlock_irqrestore(shost->host_lock, flags); 4321 } 4322 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4323 4324 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4325 if (lpfc_cmd->ts_cmd_start) { 4326 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp; 4327 lpfc_cmd->ts_data_io = ktime_get_ns(); 4328 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4329 lpfc_io_ktime(phba, lpfc_cmd); 4330 } 4331 #endif 4332 lpfc_cmd->pCmd = NULL; 4333 spin_unlock(&lpfc_cmd->buf_lock); 4334 4335 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4336 cmd->scsi_done(cmd); 4337 4338 /* 4339 * If there is an abort thread waiting for command completion 4340 * wake up the thread. 4341 */ 4342 spin_lock(&lpfc_cmd->buf_lock); 4343 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4344 if (lpfc_cmd->waitq) 4345 wake_up(lpfc_cmd->waitq); 4346 spin_unlock(&lpfc_cmd->buf_lock); 4347 4348 lpfc_release_scsi_buf(phba, lpfc_cmd); 4349 } 4350 4351 /** 4352 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 4353 * @phba: The Hba for which this call is being executed. 4354 * @pIocbIn: The command IOCBQ for the scsi cmnd. 4355 * @pIocbOut: The response IOCBQ for the scsi cmnd. 4356 * 4357 * This routine assigns scsi command result by looking into response IOCB 4358 * status field appropriately. This routine handles QUEUE FULL condition as 4359 * well by ramping down device queue depth. 4360 **/ 4361 static void 4362 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 4363 struct lpfc_iocbq *pIocbOut) 4364 { 4365 struct lpfc_io_buf *lpfc_cmd = 4366 (struct lpfc_io_buf *) pIocbIn->context1; 4367 struct lpfc_vport *vport = pIocbIn->vport; 4368 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4369 struct lpfc_nodelist *pnode = rdata->pnode; 4370 struct scsi_cmnd *cmd; 4371 unsigned long flags; 4372 struct lpfc_fast_path_event *fast_path_evt; 4373 struct Scsi_Host *shost; 4374 int idx; 4375 uint32_t logit = LOG_FCP; 4376 4377 /* Guard against abort handler being called at same time */ 4378 spin_lock(&lpfc_cmd->buf_lock); 4379 4380 /* Sanity check on return of outstanding command */ 4381 cmd = lpfc_cmd->pCmd; 4382 if (!cmd || !phba) { 4383 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4384 "2621 IO completion: Not an active IO\n"); 4385 spin_unlock(&lpfc_cmd->buf_lock); 4386 return; 4387 } 4388 4389 idx = lpfc_cmd->cur_iocbq.hba_wqidx; 4390 if (phba->sli4_hba.hdwq) 4391 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; 4392 4393 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4394 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 4395 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io); 4396 #endif 4397 shost = cmd->device->host; 4398 4399 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); 4400 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 4401 /* pick up SLI4 exhange busy status from HBA */ 4402 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) 4403 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 4404 else 4405 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 4406 4407 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4408 if (lpfc_cmd->prot_data_type) { 4409 struct scsi_dif_tuple *src = NULL; 4410 4411 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment; 4412 /* 4413 * Used to restore any changes to protection 4414 * data for error injection. 4415 */ 4416 switch (lpfc_cmd->prot_data_type) { 4417 case LPFC_INJERR_REFTAG: 4418 src->ref_tag = 4419 lpfc_cmd->prot_data; 4420 break; 4421 case LPFC_INJERR_APPTAG: 4422 src->app_tag = 4423 (uint16_t)lpfc_cmd->prot_data; 4424 break; 4425 case LPFC_INJERR_GUARD: 4426 src->guard_tag = 4427 (uint16_t)lpfc_cmd->prot_data; 4428 break; 4429 default: 4430 break; 4431 } 4432 4433 lpfc_cmd->prot_data = 0; 4434 lpfc_cmd->prot_data_type = 0; 4435 lpfc_cmd->prot_data_segment = NULL; 4436 } 4437 #endif 4438 4439 if (unlikely(lpfc_cmd->status)) { 4440 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 4441 (lpfc_cmd->result & IOERR_DRVR_MASK)) 4442 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 4443 else if (lpfc_cmd->status >= IOSTAT_CNT) 4444 lpfc_cmd->status = IOSTAT_DEFAULT; 4445 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && 4446 !lpfc_cmd->fcp_rsp->rspStatus3 && 4447 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && 4448 !(vport->cfg_log_verbose & LOG_FCP_UNDER)) 4449 logit = 0; 4450 else 4451 logit = LOG_FCP | LOG_FCP_UNDER; 4452 lpfc_printf_vlog(vport, KERN_WARNING, logit, 4453 "9030 FCP cmd x%x failed <%d/%lld> " 4454 "status: x%x result: x%x " 4455 "sid: x%x did: x%x oxid: x%x " 4456 "Data: x%x x%x\n", 4457 cmd->cmnd[0], 4458 cmd->device ? cmd->device->id : 0xffff, 4459 cmd->device ? cmd->device->lun : 0xffff, 4460 lpfc_cmd->status, lpfc_cmd->result, 4461 vport->fc_myDID, 4462 (pnode) ? pnode->nlp_DID : 0, 4463 phba->sli_rev == LPFC_SLI_REV4 ? 4464 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 4465 pIocbOut->iocb.ulpContext, 4466 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 4467 4468 switch (lpfc_cmd->status) { 4469 case IOSTAT_FCP_RSP_ERROR: 4470 /* Call FCP RSP handler to determine result */ 4471 lpfc_handle_fcp_err(vport, lpfc_cmd, 4472 pIocbOut->iocb.un.fcpi.fcpi_parm); 4473 break; 4474 case IOSTAT_NPORT_BSY: 4475 case IOSTAT_FABRIC_BSY: 4476 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 4477 fast_path_evt = lpfc_alloc_fast_evt(phba); 4478 if (!fast_path_evt) 4479 break; 4480 fast_path_evt->un.fabric_evt.event_type = 4481 FC_REG_FABRIC_EVENT; 4482 fast_path_evt->un.fabric_evt.subcategory = 4483 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 4484 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 4485 if (pnode) { 4486 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 4487 &pnode->nlp_portname, 4488 sizeof(struct lpfc_name)); 4489 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 4490 &pnode->nlp_nodename, 4491 sizeof(struct lpfc_name)); 4492 } 4493 fast_path_evt->vport = vport; 4494 fast_path_evt->work_evt.evt = 4495 LPFC_EVT_FASTPATH_MGMT_EVT; 4496 spin_lock_irqsave(&phba->hbalock, flags); 4497 list_add_tail(&fast_path_evt->work_evt.evt_listp, 4498 &phba->work_list); 4499 spin_unlock_irqrestore(&phba->hbalock, flags); 4500 lpfc_worker_wake_up(phba); 4501 break; 4502 case IOSTAT_LOCAL_REJECT: 4503 case IOSTAT_REMOTE_STOP: 4504 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || 4505 lpfc_cmd->result == 4506 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || 4507 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || 4508 lpfc_cmd->result == 4509 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { 4510 cmd->result = DID_NO_CONNECT << 16; 4511 break; 4512 } 4513 if (lpfc_cmd->result == IOERR_INVALID_RPI || 4514 lpfc_cmd->result == IOERR_NO_RESOURCES || 4515 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 4516 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 4517 cmd->result = DID_REQUEUE << 16; 4518 break; 4519 } 4520 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 4521 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 4522 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 4523 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 4524 /* 4525 * This is a response for a BG enabled 4526 * cmd. Parse BG error 4527 */ 4528 lpfc_parse_bg_err(phba, lpfc_cmd, 4529 pIocbOut); 4530 break; 4531 } else { 4532 lpfc_printf_vlog(vport, KERN_WARNING, 4533 LOG_BG, 4534 "9031 non-zero BGSTAT " 4535 "on unprotected cmd\n"); 4536 } 4537 } 4538 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) 4539 && (phba->sli_rev == LPFC_SLI_REV4) 4540 && pnode) { 4541 /* This IO was aborted by the target, we don't 4542 * know the rxid and because we did not send the 4543 * ABTS we cannot generate and RRQ. 4544 */ 4545 lpfc_set_rrq_active(phba, pnode, 4546 lpfc_cmd->cur_iocbq.sli4_lxritag, 4547 0, 0); 4548 } 4549 fallthrough; 4550 default: 4551 cmd->result = DID_ERROR << 16; 4552 break; 4553 } 4554 4555 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 4556 cmd->result = DID_TRANSPORT_DISRUPTED << 16 | 4557 SAM_STAT_BUSY; 4558 } else 4559 cmd->result = DID_OK << 16; 4560 4561 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 4562 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 4563 4564 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 4565 "0710 Iodone <%d/%llu> cmd x%px, error " 4566 "x%x SNS x%x x%x Data: x%x x%x\n", 4567 cmd->device->id, cmd->device->lun, cmd, 4568 cmd->result, *lp, *(lp + 3), cmd->retries, 4569 scsi_get_resid(cmd)); 4570 } 4571 4572 lpfc_update_stats(vport, lpfc_cmd); 4573 if (vport->cfg_max_scsicmpl_time && 4574 time_after(jiffies, lpfc_cmd->start_time + 4575 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 4576 spin_lock_irqsave(shost->host_lock, flags); 4577 if (pnode) { 4578 if (pnode->cmd_qdepth > 4579 atomic_read(&pnode->cmd_pending) && 4580 (atomic_read(&pnode->cmd_pending) > 4581 LPFC_MIN_TGT_QDEPTH) && 4582 ((cmd->cmnd[0] == READ_10) || 4583 (cmd->cmnd[0] == WRITE_10))) 4584 pnode->cmd_qdepth = 4585 atomic_read(&pnode->cmd_pending); 4586 4587 pnode->last_change_time = jiffies; 4588 } 4589 spin_unlock_irqrestore(shost->host_lock, flags); 4590 } 4591 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 4592 4593 lpfc_cmd->pCmd = NULL; 4594 spin_unlock(&lpfc_cmd->buf_lock); 4595 4596 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 4597 if (lpfc_cmd->ts_cmd_start) { 4598 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp; 4599 lpfc_cmd->ts_data_io = ktime_get_ns(); 4600 phba->ktime_last_cmd = lpfc_cmd->ts_data_io; 4601 lpfc_io_ktime(phba, lpfc_cmd); 4602 } 4603 #endif 4604 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 4605 cmd->scsi_done(cmd); 4606 4607 /* 4608 * If there is an abort thread waiting for command completion 4609 * wake up the thread. 4610 */ 4611 spin_lock(&lpfc_cmd->buf_lock); 4612 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 4613 if (lpfc_cmd->waitq) 4614 wake_up(lpfc_cmd->waitq); 4615 spin_unlock(&lpfc_cmd->buf_lock); 4616 4617 lpfc_release_scsi_buf(phba, lpfc_cmd); 4618 } 4619 4620 /** 4621 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO 4622 * @vport: Pointer to vport object. 4623 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4624 * @tmo: timeout value for the IO 4625 * 4626 * Based on the data-direction of the command, initialize IOCB 4627 * in the I/O buffer. Fill in the IOCB fields which are independent 4628 * of the scsi buffer 4629 * 4630 * RETURNS 0 - SUCCESS, 4631 **/ 4632 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, 4633 struct lpfc_io_buf *lpfc_cmd, 4634 uint8_t tmo) 4635 { 4636 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 4637 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq; 4638 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4639 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4640 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4641 int datadir = scsi_cmnd->sc_data_direction; 4642 u32 fcpdl; 4643 4644 piocbq->iocb.un.fcpi.fcpi_XRdy = 0; 4645 4646 /* 4647 * There are three possibilities here - use scatter-gather segment, use 4648 * the single mapping, or neither. Start the lpfc command prep by 4649 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 4650 * data bde entry. 4651 */ 4652 if (scsi_sg_count(scsi_cmnd)) { 4653 if (datadir == DMA_TO_DEVICE) { 4654 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 4655 iocb_cmd->ulpPU = PARM_READ_CHECK; 4656 if (vport->cfg_first_burst_size && 4657 (pnode->nlp_flag & NLP_FIRSTBURST)) { 4658 u32 xrdy_len; 4659 4660 fcpdl = scsi_bufflen(scsi_cmnd); 4661 xrdy_len = min(fcpdl, 4662 vport->cfg_first_burst_size); 4663 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len; 4664 } 4665 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4666 } else { 4667 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 4668 iocb_cmd->ulpPU = PARM_READ_CHECK; 4669 fcp_cmnd->fcpCntl3 = READ_DATA; 4670 } 4671 } else { 4672 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 4673 iocb_cmd->un.fcpi.fcpi_parm = 0; 4674 iocb_cmd->ulpPU = 0; 4675 fcp_cmnd->fcpCntl3 = 0; 4676 } 4677 4678 /* 4679 * Finish initializing those IOCB fields that are independent 4680 * of the scsi_cmnd request_buffer 4681 */ 4682 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4683 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4684 piocbq->iocb.ulpFCP2Rcvy = 1; 4685 else 4686 piocbq->iocb.ulpFCP2Rcvy = 0; 4687 4688 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 4689 piocbq->context1 = lpfc_cmd; 4690 if (!piocbq->iocb_cmpl) 4691 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4692 piocbq->iocb.ulpTimeout = tmo; 4693 piocbq->vport = vport; 4694 return 0; 4695 } 4696 4697 /** 4698 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO 4699 * @vport: Pointer to vport object. 4700 * @lpfc_cmd: The scsi buffer which is going to be prep'ed. 4701 * @tmo: timeout value for the IO 4702 * 4703 * Based on the data-direction of the command copy WQE template 4704 * to I/O buffer WQE. Fill in the WQE fields which are independent 4705 * of the scsi buffer 4706 * 4707 * RETURNS 0 - SUCCESS, 4708 **/ 4709 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport, 4710 struct lpfc_io_buf *lpfc_cmd, 4711 uint8_t tmo) 4712 { 4713 struct lpfc_hba *phba = vport->phba; 4714 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4715 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4716 struct lpfc_sli4_hdw_queue *hdwq = NULL; 4717 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq; 4718 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp; 4719 union lpfc_wqe128 *wqe = &pwqeq->wqe; 4720 u16 idx = lpfc_cmd->hdwq_no; 4721 int datadir = scsi_cmnd->sc_data_direction; 4722 4723 hdwq = &phba->sli4_hba.hdwq[idx]; 4724 4725 /* Initialize 64 bytes only */ 4726 memset(wqe, 0, sizeof(union lpfc_wqe128)); 4727 4728 /* 4729 * There are three possibilities here - use scatter-gather segment, use 4730 * the single mapping, or neither. 4731 */ 4732 if (scsi_sg_count(scsi_cmnd)) { 4733 if (datadir == DMA_TO_DEVICE) { 4734 /* From the iwrite template, initialize words 7 - 11 */ 4735 memcpy(&wqe->words[7], 4736 &lpfc_iwrite_cmd_template.words[7], 4737 sizeof(uint32_t) * 5); 4738 4739 fcp_cmnd->fcpCntl3 = WRITE_DATA; 4740 if (hdwq) 4741 hdwq->scsi_cstat.output_requests++; 4742 } else { 4743 /* From the iread template, initialize words 7 - 11 */ 4744 memcpy(&wqe->words[7], 4745 &lpfc_iread_cmd_template.words[7], 4746 sizeof(uint32_t) * 5); 4747 4748 /* Word 7 */ 4749 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo); 4750 4751 fcp_cmnd->fcpCntl3 = READ_DATA; 4752 if (hdwq) 4753 hdwq->scsi_cstat.input_requests++; 4754 } 4755 } else { 4756 /* From the icmnd template, initialize words 4 - 11 */ 4757 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], 4758 sizeof(uint32_t) * 8); 4759 4760 /* Word 7 */ 4761 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo); 4762 4763 fcp_cmnd->fcpCntl3 = 0; 4764 if (hdwq) 4765 hdwq->scsi_cstat.control_requests++; 4766 } 4767 4768 /* 4769 * Finish initializing those WQE fields that are independent 4770 * of the request_buffer 4771 */ 4772 4773 /* Word 3 */ 4774 bf_set(payload_offset_len, &wqe->fcp_icmd, 4775 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 4776 4777 /* Word 6 */ 4778 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, 4779 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]); 4780 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag); 4781 4782 /* Word 7*/ 4783 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4784 bf_set(wqe_erp, &wqe->generic.wqe_com, 1); 4785 4786 bf_set(wqe_class, &wqe->generic.wqe_com, 4787 (pnode->nlp_fcp_info & 0x0f)); 4788 4789 /* Word 8 */ 4790 wqe->generic.wqe_com.abort_tag = pwqeq->iotag; 4791 4792 /* Word 9 */ 4793 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); 4794 4795 pwqeq->vport = vport; 4796 pwqeq->vport = vport; 4797 pwqeq->context1 = lpfc_cmd; 4798 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no; 4799 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl; 4800 4801 return 0; 4802 } 4803 4804 /** 4805 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 4806 * @vport: The virtual port for which this call is being executed. 4807 * @lpfc_cmd: The scsi command which needs to send. 4808 * @pnode: Pointer to lpfc_nodelist. 4809 * 4810 * This routine initializes fcp_cmnd and iocb data structure from scsi command 4811 * to transfer for device with SLI3 interface spec. 4812 **/ 4813 static int 4814 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, 4815 struct lpfc_nodelist *pnode) 4816 { 4817 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 4818 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 4819 u8 *ptr; 4820 4821 if (!pnode) 4822 return 0; 4823 4824 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 4825 /* clear task management bits */ 4826 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 4827 4828 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 4829 &lpfc_cmd->fcp_cmnd->fcp_lun); 4830 4831 ptr = &fcp_cmnd->fcpCdb[0]; 4832 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 4833 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) { 4834 ptr += scsi_cmnd->cmd_len; 4835 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); 4836 } 4837 4838 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 4839 4840 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout); 4841 4842 return 0; 4843 } 4844 4845 /** 4846 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit 4847 * @vport: The virtual port for which this call is being executed. 4848 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 4849 * @lun: Logical unit number. 4850 * @task_mgmt_cmd: SCSI task management command. 4851 * 4852 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 4853 * for device with SLI-3 interface spec. 4854 * 4855 * Return codes: 4856 * 0 - Error 4857 * 1 - Success 4858 **/ 4859 static int 4860 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 4861 struct lpfc_io_buf *lpfc_cmd, 4862 uint64_t lun, 4863 uint8_t task_mgmt_cmd) 4864 { 4865 struct lpfc_iocbq *piocbq; 4866 IOCB_t *piocb; 4867 struct fcp_cmnd *fcp_cmnd; 4868 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 4869 struct lpfc_nodelist *ndlp = rdata->pnode; 4870 4871 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE) 4872 return 0; 4873 4874 piocbq = &(lpfc_cmd->cur_iocbq); 4875 piocbq->vport = vport; 4876 4877 piocb = &piocbq->iocb; 4878 4879 fcp_cmnd = lpfc_cmd->fcp_cmnd; 4880 /* Clear out any old data in the FCP command area */ 4881 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 4882 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 4883 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 4884 if (vport->phba->sli_rev == 3 && 4885 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 4886 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 4887 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 4888 piocb->ulpContext = ndlp->nlp_rpi; 4889 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4890 piocb->ulpContext = 4891 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4892 } 4893 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; 4894 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 4895 piocb->ulpPU = 0; 4896 piocb->un.fcpi.fcpi_parm = 0; 4897 4898 /* ulpTimeout is only one byte */ 4899 if (lpfc_cmd->timeout > 0xff) { 4900 /* 4901 * Do not timeout the command at the firmware level. 4902 * The driver will provide the timeout mechanism. 4903 */ 4904 piocb->ulpTimeout = 0; 4905 } else 4906 piocb->ulpTimeout = lpfc_cmd->timeout; 4907 4908 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4909 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 4910 4911 return 1; 4912 } 4913 4914 /** 4915 * lpfc_scsi_api_table_setup - Set up scsi api function jump table 4916 * @phba: The hba struct for which this call is being executed. 4917 * @dev_grp: The HBA PCI-Device group number. 4918 * 4919 * This routine sets up the SCSI interface API function jump table in @phba 4920 * struct. 4921 * Returns: 0 - success, -ENODEV - failure. 4922 **/ 4923 int 4924 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4925 { 4926 4927 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 4928 4929 switch (dev_grp) { 4930 case LPFC_PCI_DEV_LP: 4931 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 4932 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; 4933 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 4934 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 4935 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3; 4936 break; 4937 case LPFC_PCI_DEV_OC: 4938 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 4939 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; 4940 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 4941 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 4942 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4; 4943 break; 4944 default: 4945 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4946 "1418 Invalid HBA PCI-device group: 0x%x\n", 4947 dev_grp); 4948 return -ENODEV; 4949 } 4950 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 4951 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 4952 return 0; 4953 } 4954 4955 /** 4956 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command 4957 * @phba: The Hba for which this call is being executed. 4958 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 4959 * @rspiocbq: Pointer to lpfc_iocbq data structure. 4960 * 4961 * This routine is IOCB completion routine for device reset and target reset 4962 * routine. This routine release scsi buffer associated with lpfc_cmd. 4963 **/ 4964 static void 4965 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 4966 struct lpfc_iocbq *cmdiocbq, 4967 struct lpfc_iocbq *rspiocbq) 4968 { 4969 struct lpfc_io_buf *lpfc_cmd = 4970 (struct lpfc_io_buf *) cmdiocbq->context1; 4971 if (lpfc_cmd) 4972 lpfc_release_scsi_buf(phba, lpfc_cmd); 4973 return; 4974 } 4975 4976 /** 4977 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check 4978 * if issuing a pci_bus_reset is possibly unsafe 4979 * @phba: lpfc_hba pointer. 4980 * 4981 * Description: 4982 * Walks the bus_list to ensure only PCI devices with Emulex 4983 * vendor id, device ids that support hot reset, and only one occurrence 4984 * of function 0. 4985 * 4986 * Returns: 4987 * -EBADSLT, detected invalid device 4988 * 0, successful 4989 */ 4990 int 4991 lpfc_check_pci_resettable(struct lpfc_hba *phba) 4992 { 4993 const struct pci_dev *pdev = phba->pcidev; 4994 struct pci_dev *ptr = NULL; 4995 u8 counter = 0; 4996 4997 /* Walk the list of devices on the pci_dev's bus */ 4998 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) { 4999 /* Check for Emulex Vendor ID */ 5000 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) { 5001 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5002 "8346 Non-Emulex vendor found: " 5003 "0x%04x\n", ptr->vendor); 5004 return -EBADSLT; 5005 } 5006 5007 /* Check for valid Emulex Device ID */ 5008 switch (ptr->device) { 5009 case PCI_DEVICE_ID_LANCER_FC: 5010 case PCI_DEVICE_ID_LANCER_G6_FC: 5011 case PCI_DEVICE_ID_LANCER_G7_FC: 5012 break; 5013 default: 5014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5015 "8347 Incapable PCI reset device: " 5016 "0x%04x\n", ptr->device); 5017 return -EBADSLT; 5018 } 5019 5020 /* Check for only one function 0 ID to ensure only one HBA on 5021 * secondary bus 5022 */ 5023 if (ptr->devfn == 0) { 5024 if (++counter > 1) { 5025 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5026 "8348 More than one device on " 5027 "secondary bus found\n"); 5028 return -EBADSLT; 5029 } 5030 } 5031 } 5032 5033 return 0; 5034 } 5035 5036 /** 5037 * lpfc_info - Info entry point of scsi_host_template data structure 5038 * @host: The scsi host for which this call is being executed. 5039 * 5040 * This routine provides module information about hba. 5041 * 5042 * Reutrn code: 5043 * Pointer to char - Success. 5044 **/ 5045 const char * 5046 lpfc_info(struct Scsi_Host *host) 5047 { 5048 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 5049 struct lpfc_hba *phba = vport->phba; 5050 int link_speed = 0; 5051 static char lpfcinfobuf[384]; 5052 char tmp[384] = {0}; 5053 5054 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf)); 5055 if (phba && phba->pcidev){ 5056 /* Model Description */ 5057 scnprintf(tmp, sizeof(tmp), phba->ModelDesc); 5058 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5059 sizeof(lpfcinfobuf)) 5060 goto buffer_done; 5061 5062 /* PCI Info */ 5063 scnprintf(tmp, sizeof(tmp), 5064 " on PCI bus %02x device %02x irq %d", 5065 phba->pcidev->bus->number, phba->pcidev->devfn, 5066 phba->pcidev->irq); 5067 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5068 sizeof(lpfcinfobuf)) 5069 goto buffer_done; 5070 5071 /* Port Number */ 5072 if (phba->Port[0]) { 5073 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port); 5074 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5075 sizeof(lpfcinfobuf)) 5076 goto buffer_done; 5077 } 5078 5079 /* Link Speed */ 5080 link_speed = lpfc_sli_port_speed_get(phba); 5081 if (link_speed != 0) { 5082 scnprintf(tmp, sizeof(tmp), 5083 " Logical Link Speed: %d Mbps", link_speed); 5084 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >= 5085 sizeof(lpfcinfobuf)) 5086 goto buffer_done; 5087 } 5088 5089 /* PCI resettable */ 5090 if (!lpfc_check_pci_resettable(phba)) { 5091 scnprintf(tmp, sizeof(tmp), " PCI resettable"); 5092 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)); 5093 } 5094 } 5095 5096 buffer_done: 5097 return lpfcinfobuf; 5098 } 5099 5100 /** 5101 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba 5102 * @phba: The Hba for which this call is being executed. 5103 * 5104 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 5105 * The default value of cfg_poll_tmo is 10 milliseconds. 5106 **/ 5107 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 5108 { 5109 unsigned long poll_tmo_expires = 5110 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 5111 5112 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) 5113 mod_timer(&phba->fcp_poll_timer, 5114 poll_tmo_expires); 5115 } 5116 5117 /** 5118 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 5119 * @phba: The Hba for which this call is being executed. 5120 * 5121 * This routine starts the fcp_poll_timer of @phba. 5122 **/ 5123 void lpfc_poll_start_timer(struct lpfc_hba * phba) 5124 { 5125 lpfc_poll_rearm_timer(phba); 5126 } 5127 5128 /** 5129 * lpfc_poll_timeout - Restart polling timer 5130 * @t: Timer construct where lpfc_hba data structure pointer is obtained. 5131 * 5132 * This routine restarts fcp_poll timer, when FCP ring polling is enable 5133 * and FCP Ring interrupt is disable. 5134 **/ 5135 void lpfc_poll_timeout(struct timer_list *t) 5136 { 5137 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); 5138 5139 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5140 lpfc_sli_handle_fast_ring_event(phba, 5141 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5142 5143 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5144 lpfc_poll_rearm_timer(phba); 5145 } 5146 } 5147 5148 /** 5149 * lpfc_queuecommand - scsi_host_template queuecommand entry point 5150 * @shost: kernel scsi host pointer. 5151 * @cmnd: Pointer to scsi_cmnd data structure. 5152 * 5153 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 5154 * This routine prepares an IOCB from scsi command and provides to firmware. 5155 * The @done callback is invoked after driver finished processing the command. 5156 * 5157 * Return value : 5158 * 0 - Success 5159 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 5160 **/ 5161 static int 5162 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 5163 { 5164 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5165 struct lpfc_hba *phba = vport->phba; 5166 struct lpfc_rport_data *rdata; 5167 struct lpfc_nodelist *ndlp; 5168 struct lpfc_io_buf *lpfc_cmd; 5169 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5170 int err, idx; 5171 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5172 uint64_t start = 0L; 5173 5174 if (phba->ktime_on) 5175 start = ktime_get_ns(); 5176 #endif 5177 5178 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5179 5180 /* sanity check on references */ 5181 if (unlikely(!rdata) || unlikely(!rport)) 5182 goto out_fail_command; 5183 5184 err = fc_remote_port_chkready(rport); 5185 if (err) { 5186 cmnd->result = err; 5187 goto out_fail_command; 5188 } 5189 ndlp = rdata->pnode; 5190 5191 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 5192 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) { 5193 5194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5195 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 5196 " op:%02x str=%s without registering for" 5197 " BlockGuard - Rejecting command\n", 5198 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 5199 dif_op_str[scsi_get_prot_op(cmnd)]); 5200 goto out_fail_command; 5201 } 5202 5203 /* 5204 * Catch race where our node has transitioned, but the 5205 * transport is still transitioning. 5206 */ 5207 if (!ndlp) 5208 goto out_tgt_busy; 5209 if (lpfc_ndlp_check_qdepth(phba, ndlp)) { 5210 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { 5211 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5212 "3377 Target Queue Full, scsi Id:%d " 5213 "Qdepth:%d Pending command:%d" 5214 " WWNN:%02x:%02x:%02x:%02x:" 5215 "%02x:%02x:%02x:%02x, " 5216 " WWPN:%02x:%02x:%02x:%02x:" 5217 "%02x:%02x:%02x:%02x", 5218 ndlp->nlp_sid, ndlp->cmd_qdepth, 5219 atomic_read(&ndlp->cmd_pending), 5220 ndlp->nlp_nodename.u.wwn[0], 5221 ndlp->nlp_nodename.u.wwn[1], 5222 ndlp->nlp_nodename.u.wwn[2], 5223 ndlp->nlp_nodename.u.wwn[3], 5224 ndlp->nlp_nodename.u.wwn[4], 5225 ndlp->nlp_nodename.u.wwn[5], 5226 ndlp->nlp_nodename.u.wwn[6], 5227 ndlp->nlp_nodename.u.wwn[7], 5228 ndlp->nlp_portname.u.wwn[0], 5229 ndlp->nlp_portname.u.wwn[1], 5230 ndlp->nlp_portname.u.wwn[2], 5231 ndlp->nlp_portname.u.wwn[3], 5232 ndlp->nlp_portname.u.wwn[4], 5233 ndlp->nlp_portname.u.wwn[5], 5234 ndlp->nlp_portname.u.wwn[6], 5235 ndlp->nlp_portname.u.wwn[7]); 5236 goto out_tgt_busy; 5237 } 5238 } 5239 5240 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); 5241 if (lpfc_cmd == NULL) { 5242 lpfc_rampdown_queue_depth(phba); 5243 5244 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, 5245 "0707 driver's buffer pool is empty, " 5246 "IO busied\n"); 5247 goto out_host_busy; 5248 } 5249 5250 /* 5251 * Store the midlayer's command structure for the completion phase 5252 * and complete the command initialization. 5253 */ 5254 lpfc_cmd->pCmd = cmnd; 5255 lpfc_cmd->rdata = rdata; 5256 lpfc_cmd->ndlp = ndlp; 5257 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; 5258 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 5259 5260 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 5261 if (err) 5262 goto out_host_busy_release_buf; 5263 5264 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 5265 if (vport->phba->cfg_enable_bg) { 5266 lpfc_printf_vlog(vport, 5267 KERN_INFO, LOG_SCSI_CMD, 5268 "9033 BLKGRD: rcvd %s cmd:x%x " 5269 "reftag x%x cnt %u pt %x\n", 5270 dif_op_str[scsi_get_prot_op(cmnd)], 5271 cmnd->cmnd[0], 5272 t10_pi_ref_tag(cmnd->request), 5273 blk_rq_sectors(cmnd->request), 5274 (cmnd->cmnd[1]>>5)); 5275 } 5276 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 5277 } else { 5278 if (vport->phba->cfg_enable_bg) { 5279 lpfc_printf_vlog(vport, 5280 KERN_INFO, LOG_SCSI_CMD, 5281 "9038 BLKGRD: rcvd PROT_NORMAL cmd: " 5282 "x%x reftag x%x cnt %u pt %x\n", 5283 cmnd->cmnd[0], 5284 t10_pi_ref_tag(cmnd->request), 5285 blk_rq_sectors(cmnd->request), 5286 (cmnd->cmnd[1]>>5)); 5287 } 5288 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 5289 } 5290 5291 if (unlikely(err)) { 5292 if (err == 2) { 5293 cmnd->result = DID_ERROR << 16; 5294 goto out_fail_command_release_buf; 5295 } 5296 goto out_host_busy_free_buf; 5297 } 5298 5299 5300 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5301 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) 5302 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); 5303 #endif 5304 /* Issue I/O to adapter */ 5305 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, 5306 &lpfc_cmd->cur_iocbq, 5307 SLI_IOCB_RET_IOCB); 5308 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5309 if (start) { 5310 lpfc_cmd->ts_cmd_start = start; 5311 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd; 5312 lpfc_cmd->ts_cmd_wqput = ktime_get_ns(); 5313 } else { 5314 lpfc_cmd->ts_cmd_start = 0; 5315 } 5316 #endif 5317 if (err) { 5318 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5319 "3376 FCP could not issue IOCB err %x " 5320 "FCP cmd x%x <%d/%llu> " 5321 "sid: x%x did: x%x oxid: x%x " 5322 "Data: x%x x%x x%x x%x\n", 5323 err, cmnd->cmnd[0], 5324 cmnd->device ? cmnd->device->id : 0xffff, 5325 cmnd->device ? cmnd->device->lun : (u64)-1, 5326 vport->fc_myDID, ndlp->nlp_DID, 5327 phba->sli_rev == LPFC_SLI_REV4 ? 5328 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, 5329 phba->sli_rev == LPFC_SLI_REV4 ? 5330 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] : 5331 lpfc_cmd->cur_iocbq.iocb.ulpContext, 5332 lpfc_cmd->cur_iocbq.iotag, 5333 phba->sli_rev == LPFC_SLI_REV4 ? 5334 bf_get(wqe_tmo, 5335 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : 5336 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, 5337 (uint32_t) 5338 (cmnd->request->timeout / 1000)); 5339 5340 goto out_host_busy_free_buf; 5341 } 5342 5343 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 5344 lpfc_sli_handle_fast_ring_event(phba, 5345 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5346 5347 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5348 lpfc_poll_rearm_timer(phba); 5349 } 5350 5351 if (phba->cfg_xri_rebalancing) 5352 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); 5353 5354 return 0; 5355 5356 out_host_busy_free_buf: 5357 idx = lpfc_cmd->hdwq_no; 5358 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 5359 if (phba->sli4_hba.hdwq) { 5360 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { 5361 case WRITE_DATA: 5362 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; 5363 break; 5364 case READ_DATA: 5365 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; 5366 break; 5367 default: 5368 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; 5369 } 5370 } 5371 out_host_busy_release_buf: 5372 lpfc_release_scsi_buf(phba, lpfc_cmd); 5373 out_host_busy: 5374 return SCSI_MLQUEUE_HOST_BUSY; 5375 5376 out_tgt_busy: 5377 return SCSI_MLQUEUE_TARGET_BUSY; 5378 5379 out_fail_command_release_buf: 5380 lpfc_release_scsi_buf(phba, lpfc_cmd); 5381 5382 out_fail_command: 5383 cmnd->scsi_done(cmnd); 5384 return 0; 5385 } 5386 5387 5388 /** 5389 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 5390 * @cmnd: Pointer to scsi_cmnd data structure. 5391 * 5392 * This routine aborts @cmnd pending in base driver. 5393 * 5394 * Return code : 5395 * 0x2003 - Error 5396 * 0x2002 - Success 5397 **/ 5398 static int 5399 lpfc_abort_handler(struct scsi_cmnd *cmnd) 5400 { 5401 struct Scsi_Host *shost = cmnd->device->host; 5402 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5403 struct lpfc_hba *phba = vport->phba; 5404 struct lpfc_iocbq *iocb; 5405 struct lpfc_io_buf *lpfc_cmd; 5406 int ret = SUCCESS, status = 0; 5407 struct lpfc_sli_ring *pring_s4 = NULL; 5408 struct lpfc_sli_ring *pring = NULL; 5409 int ret_val; 5410 unsigned long flags; 5411 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5412 5413 status = fc_block_scsi_eh(cmnd); 5414 if (status != 0 && status != SUCCESS) 5415 return status; 5416 5417 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; 5418 if (!lpfc_cmd) 5419 return ret; 5420 5421 spin_lock_irqsave(&phba->hbalock, flags); 5422 /* driver queued commands are in process of being flushed */ 5423 if (phba->hba_flag & HBA_IOQ_FLUSH) { 5424 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5425 "3168 SCSI Layer abort requested I/O has been " 5426 "flushed by LLD.\n"); 5427 ret = FAILED; 5428 goto out_unlock; 5429 } 5430 5431 /* Guard against IO completion being called at same time */ 5432 spin_lock(&lpfc_cmd->buf_lock); 5433 5434 if (!lpfc_cmd->pCmd) { 5435 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5436 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 5437 "x%x ID %d LUN %llu\n", 5438 SUCCESS, cmnd->device->id, cmnd->device->lun); 5439 goto out_unlock_buf; 5440 } 5441 5442 iocb = &lpfc_cmd->cur_iocbq; 5443 if (phba->sli_rev == LPFC_SLI_REV4) { 5444 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; 5445 if (!pring_s4) { 5446 ret = FAILED; 5447 goto out_unlock_buf; 5448 } 5449 spin_lock(&pring_s4->ring_lock); 5450 } 5451 /* the command is in process of being cancelled */ 5452 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 5453 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5454 "3169 SCSI Layer abort requested I/O has been " 5455 "cancelled by LLD.\n"); 5456 ret = FAILED; 5457 goto out_unlock_ring; 5458 } 5459 /* 5460 * If pCmd field of the corresponding lpfc_io_buf structure 5461 * points to a different SCSI command, then the driver has 5462 * already completed this command, but the midlayer did not 5463 * see the completion before the eh fired. Just return SUCCESS. 5464 */ 5465 if (lpfc_cmd->pCmd != cmnd) { 5466 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5467 "3170 SCSI Layer abort requested I/O has been " 5468 "completed by LLD.\n"); 5469 goto out_unlock_ring; 5470 } 5471 5472 BUG_ON(iocb->context1 != lpfc_cmd); 5473 5474 /* abort issued in recovery is still in progress */ 5475 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 5476 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5477 "3389 SCSI Layer I/O Abort Request is pending\n"); 5478 if (phba->sli_rev == LPFC_SLI_REV4) 5479 spin_unlock(&pring_s4->ring_lock); 5480 spin_unlock(&lpfc_cmd->buf_lock); 5481 spin_unlock_irqrestore(&phba->hbalock, flags); 5482 goto wait_for_cmpl; 5483 } 5484 5485 lpfc_cmd->waitq = &waitq; 5486 if (phba->sli_rev == LPFC_SLI_REV4) { 5487 spin_unlock(&pring_s4->ring_lock); 5488 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb, 5489 lpfc_sli4_abort_fcp_cmpl); 5490 } else { 5491 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 5492 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb, 5493 lpfc_sli_abort_fcp_cmpl); 5494 } 5495 5496 /* Make sure HBA is alive */ 5497 lpfc_issue_hb_tmo(phba); 5498 5499 if (ret_val != IOCB_SUCCESS) { 5500 /* Indicate the IO is not being aborted by the driver. */ 5501 lpfc_cmd->waitq = NULL; 5502 spin_unlock(&lpfc_cmd->buf_lock); 5503 spin_unlock_irqrestore(&phba->hbalock, flags); 5504 ret = FAILED; 5505 goto out; 5506 } 5507 5508 /* no longer need the lock after this point */ 5509 spin_unlock(&lpfc_cmd->buf_lock); 5510 spin_unlock_irqrestore(&phba->hbalock, flags); 5511 5512 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 5513 lpfc_sli_handle_fast_ring_event(phba, 5514 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 5515 5516 wait_for_cmpl: 5517 /* 5518 * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait 5519 * for abort to complete. 5520 */ 5521 wait_event_timeout(waitq, 5522 (lpfc_cmd->pCmd != cmnd), 5523 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); 5524 5525 spin_lock(&lpfc_cmd->buf_lock); 5526 5527 if (lpfc_cmd->pCmd == cmnd) { 5528 ret = FAILED; 5529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5530 "0748 abort handler timed out waiting " 5531 "for aborting I/O (xri:x%x) to complete: " 5532 "ret %#x, ID %d, LUN %llu\n", 5533 iocb->sli4_xritag, ret, 5534 cmnd->device->id, cmnd->device->lun); 5535 } 5536 5537 lpfc_cmd->waitq = NULL; 5538 5539 spin_unlock(&lpfc_cmd->buf_lock); 5540 goto out; 5541 5542 out_unlock_ring: 5543 if (phba->sli_rev == LPFC_SLI_REV4) 5544 spin_unlock(&pring_s4->ring_lock); 5545 out_unlock_buf: 5546 spin_unlock(&lpfc_cmd->buf_lock); 5547 out_unlock: 5548 spin_unlock_irqrestore(&phba->hbalock, flags); 5549 out: 5550 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 5551 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 5552 "LUN %llu\n", ret, cmnd->device->id, 5553 cmnd->device->lun); 5554 return ret; 5555 } 5556 5557 static char * 5558 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 5559 { 5560 switch (task_mgmt_cmd) { 5561 case FCP_ABORT_TASK_SET: 5562 return "ABORT_TASK_SET"; 5563 case FCP_CLEAR_TASK_SET: 5564 return "FCP_CLEAR_TASK_SET"; 5565 case FCP_BUS_RESET: 5566 return "FCP_BUS_RESET"; 5567 case FCP_LUN_RESET: 5568 return "FCP_LUN_RESET"; 5569 case FCP_TARGET_RESET: 5570 return "FCP_TARGET_RESET"; 5571 case FCP_CLEAR_ACA: 5572 return "FCP_CLEAR_ACA"; 5573 case FCP_TERMINATE_TASK: 5574 return "FCP_TERMINATE_TASK"; 5575 default: 5576 return "unknown"; 5577 } 5578 } 5579 5580 5581 /** 5582 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed 5583 * @vport: The virtual port for which this call is being executed. 5584 * @lpfc_cmd: Pointer to lpfc_io_buf data structure. 5585 * 5586 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded 5587 * 5588 * Return code : 5589 * 0x2003 - Error 5590 * 0x2002 - Success 5591 **/ 5592 static int 5593 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) 5594 { 5595 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 5596 uint32_t rsp_info; 5597 uint32_t rsp_len; 5598 uint8_t rsp_info_code; 5599 int ret = FAILED; 5600 5601 5602 if (fcprsp == NULL) 5603 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5604 "0703 fcp_rsp is missing\n"); 5605 else { 5606 rsp_info = fcprsp->rspStatus2; 5607 rsp_len = be32_to_cpu(fcprsp->rspRspLen); 5608 rsp_info_code = fcprsp->rspInfo3; 5609 5610 5611 lpfc_printf_vlog(vport, KERN_INFO, 5612 LOG_FCP, 5613 "0706 fcp_rsp valid 0x%x," 5614 " rsp len=%d code 0x%x\n", 5615 rsp_info, 5616 rsp_len, rsp_info_code); 5617 5618 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN 5619 * field specifies the number of valid bytes of FCP_RSP_INFO. 5620 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08 5621 */ 5622 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) && 5623 ((rsp_len == 8) || (rsp_len == 4))) { 5624 switch (rsp_info_code) { 5625 case RSP_NO_FAILURE: 5626 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5627 "0715 Task Mgmt No Failure\n"); 5628 ret = SUCCESS; 5629 break; 5630 case RSP_TM_NOT_SUPPORTED: /* TM rejected */ 5631 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5632 "0716 Task Mgmt Target " 5633 "reject\n"); 5634 break; 5635 case RSP_TM_NOT_COMPLETED: /* TM failed */ 5636 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5637 "0717 Task Mgmt Target " 5638 "failed TM\n"); 5639 break; 5640 case RSP_TM_INVALID_LU: /* TM to invalid LU! */ 5641 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5642 "0718 Task Mgmt to invalid " 5643 "LUN\n"); 5644 break; 5645 } 5646 } 5647 } 5648 return ret; 5649 } 5650 5651 5652 /** 5653 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 5654 * @vport: The virtual port for which this call is being executed. 5655 * @cmnd: Pointer to scsi_cmnd data structure. 5656 * @tgt_id: Target ID of remote device. 5657 * @lun_id: Lun number for the TMF 5658 * @task_mgmt_cmd: type of TMF to send 5659 * 5660 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 5661 * a remote port. 5662 * 5663 * Return Code: 5664 * 0x2003 - Error 5665 * 0x2002 - Success. 5666 **/ 5667 static int 5668 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, 5669 unsigned int tgt_id, uint64_t lun_id, 5670 uint8_t task_mgmt_cmd) 5671 { 5672 struct lpfc_hba *phba = vport->phba; 5673 struct lpfc_io_buf *lpfc_cmd; 5674 struct lpfc_iocbq *iocbq; 5675 struct lpfc_iocbq *iocbqrsp; 5676 struct lpfc_rport_data *rdata; 5677 struct lpfc_nodelist *pnode; 5678 int ret; 5679 int status; 5680 5681 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5682 if (!rdata || !rdata->pnode) 5683 return FAILED; 5684 pnode = rdata->pnode; 5685 5686 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); 5687 if (lpfc_cmd == NULL) 5688 return FAILED; 5689 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; 5690 lpfc_cmd->rdata = rdata; 5691 lpfc_cmd->pCmd = cmnd; 5692 lpfc_cmd->ndlp = pnode; 5693 5694 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 5695 task_mgmt_cmd); 5696 if (!status) { 5697 lpfc_release_scsi_buf(phba, lpfc_cmd); 5698 return FAILED; 5699 } 5700 5701 iocbq = &lpfc_cmd->cur_iocbq; 5702 iocbqrsp = lpfc_sli_get_iocbq(phba); 5703 if (iocbqrsp == NULL) { 5704 lpfc_release_scsi_buf(phba, lpfc_cmd); 5705 return FAILED; 5706 } 5707 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 5708 5709 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5710 "0702 Issue %s to TGT %d LUN %llu " 5711 "rpi x%x nlp_flag x%x Data: x%x x%x\n", 5712 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 5713 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, 5714 iocbq->iocb_flag); 5715 5716 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 5717 iocbq, iocbqrsp, lpfc_cmd->timeout); 5718 if ((status != IOCB_SUCCESS) || 5719 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { 5720 if (status != IOCB_SUCCESS || 5721 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR) 5722 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5723 "0727 TMF %s to TGT %d LUN %llu " 5724 "failed (%d, %d) iocb_flag x%x\n", 5725 lpfc_taskmgmt_name(task_mgmt_cmd), 5726 tgt_id, lun_id, 5727 iocbqrsp->iocb.ulpStatus, 5728 iocbqrsp->iocb.un.ulpWord[4], 5729 iocbq->iocb_flag); 5730 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ 5731 if (status == IOCB_SUCCESS) { 5732 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 5733 /* Something in the FCP_RSP was invalid. 5734 * Check conditions */ 5735 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); 5736 else 5737 ret = FAILED; 5738 } else if (status == IOCB_TIMEDOUT) { 5739 ret = TIMEOUT_ERROR; 5740 } else { 5741 ret = FAILED; 5742 } 5743 } else 5744 ret = SUCCESS; 5745 5746 lpfc_sli_release_iocbq(phba, iocbqrsp); 5747 5748 if (ret != TIMEOUT_ERROR) 5749 lpfc_release_scsi_buf(phba, lpfc_cmd); 5750 5751 return ret; 5752 } 5753 5754 /** 5755 * lpfc_chk_tgt_mapped - 5756 * @vport: The virtual port to check on 5757 * @cmnd: Pointer to scsi_cmnd data structure. 5758 * 5759 * This routine delays until the scsi target (aka rport) for the 5760 * command exists (is present and logged in) or we declare it non-existent. 5761 * 5762 * Return code : 5763 * 0x2003 - Error 5764 * 0x2002 - Success 5765 **/ 5766 static int 5767 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 5768 { 5769 struct lpfc_rport_data *rdata; 5770 struct lpfc_nodelist *pnode; 5771 unsigned long later; 5772 5773 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5774 if (!rdata) { 5775 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 5776 "0797 Tgt Map rport failure: rdata x%px\n", rdata); 5777 return FAILED; 5778 } 5779 pnode = rdata->pnode; 5780 /* 5781 * If target is not in a MAPPED state, delay until 5782 * target is rediscovered or devloss timeout expires. 5783 */ 5784 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5785 while (time_after(later, jiffies)) { 5786 if (!pnode) 5787 return FAILED; 5788 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 5789 return SUCCESS; 5790 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 5791 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5792 if (!rdata) 5793 return FAILED; 5794 pnode = rdata->pnode; 5795 } 5796 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 5797 return FAILED; 5798 return SUCCESS; 5799 } 5800 5801 /** 5802 * lpfc_reset_flush_io_context - 5803 * @vport: The virtual port (scsi_host) for the flush context 5804 * @tgt_id: If aborting by Target contect - specifies the target id 5805 * @lun_id: If aborting by Lun context - specifies the lun id 5806 * @context: specifies the context level to flush at. 5807 * 5808 * After a reset condition via TMF, we need to flush orphaned i/o 5809 * contexts from the adapter. This routine aborts any contexts 5810 * outstanding, then waits for their completions. The wait is 5811 * bounded by devloss_tmo though. 5812 * 5813 * Return code : 5814 * 0x2003 - Error 5815 * 0x2002 - Success 5816 **/ 5817 static int 5818 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 5819 uint64_t lun_id, lpfc_ctx_cmd context) 5820 { 5821 struct lpfc_hba *phba = vport->phba; 5822 unsigned long later; 5823 int cnt; 5824 5825 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5826 if (cnt) 5827 lpfc_sli_abort_taskmgmt(vport, 5828 &phba->sli.sli3_ring[LPFC_FCP_RING], 5829 tgt_id, lun_id, context); 5830 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 5831 while (time_after(later, jiffies) && cnt) { 5832 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 5833 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 5834 } 5835 if (cnt) { 5836 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5837 "0724 I/O flush failure for context %s : cnt x%x\n", 5838 ((context == LPFC_CTX_LUN) ? "LUN" : 5839 ((context == LPFC_CTX_TGT) ? "TGT" : 5840 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 5841 cnt); 5842 return FAILED; 5843 } 5844 return SUCCESS; 5845 } 5846 5847 /** 5848 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 5849 * @cmnd: Pointer to scsi_cmnd data structure. 5850 * 5851 * This routine does a device reset by sending a LUN_RESET task management 5852 * command. 5853 * 5854 * Return code : 5855 * 0x2003 - Error 5856 * 0x2002 - Success 5857 **/ 5858 static int 5859 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 5860 { 5861 struct Scsi_Host *shost = cmnd->device->host; 5862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5863 struct lpfc_rport_data *rdata; 5864 struct lpfc_nodelist *pnode; 5865 unsigned tgt_id = cmnd->device->id; 5866 uint64_t lun_id = cmnd->device->lun; 5867 struct lpfc_scsi_event_header scsi_event; 5868 int status; 5869 u32 logit = LOG_FCP; 5870 5871 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5872 if (!rdata || !rdata->pnode) { 5873 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5874 "0798 Device Reset rdata failure: rdata x%px\n", 5875 rdata); 5876 return FAILED; 5877 } 5878 pnode = rdata->pnode; 5879 status = fc_block_scsi_eh(cmnd); 5880 if (status != 0 && status != SUCCESS) 5881 return status; 5882 5883 status = lpfc_chk_tgt_mapped(vport, cmnd); 5884 if (status == FAILED) { 5885 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5886 "0721 Device Reset rport failure: rdata x%px\n", rdata); 5887 return FAILED; 5888 } 5889 5890 scsi_event.event_type = FC_REG_SCSI_EVENT; 5891 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 5892 scsi_event.lun = lun_id; 5893 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 5894 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 5895 5896 fc_host_post_vendor_event(shost, fc_get_event_number(), 5897 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5898 5899 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5900 FCP_LUN_RESET); 5901 if (status != SUCCESS) 5902 logit = LOG_TRACE_EVENT; 5903 5904 lpfc_printf_vlog(vport, KERN_ERR, logit, 5905 "0713 SCSI layer issued Device Reset (%d, %llu) " 5906 "return x%x\n", tgt_id, lun_id, status); 5907 5908 /* 5909 * We have to clean up i/o as : they may be orphaned by the TMF; 5910 * or if the TMF failed, they may be in an indeterminate state. 5911 * So, continue on. 5912 * We will report success if all the i/o aborts successfully. 5913 */ 5914 if (status == SUCCESS) 5915 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5916 LPFC_CTX_LUN); 5917 5918 return status; 5919 } 5920 5921 /** 5922 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 5923 * @cmnd: Pointer to scsi_cmnd data structure. 5924 * 5925 * This routine does a target reset by sending a TARGET_RESET task management 5926 * command. 5927 * 5928 * Return code : 5929 * 0x2003 - Error 5930 * 0x2002 - Success 5931 **/ 5932 static int 5933 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 5934 { 5935 struct Scsi_Host *shost = cmnd->device->host; 5936 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 5937 struct lpfc_rport_data *rdata; 5938 struct lpfc_nodelist *pnode; 5939 unsigned tgt_id = cmnd->device->id; 5940 uint64_t lun_id = cmnd->device->lun; 5941 struct lpfc_scsi_event_header scsi_event; 5942 int status; 5943 u32 logit = LOG_FCP; 5944 unsigned long flags; 5945 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 5946 5947 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5948 if (!rdata || !rdata->pnode) { 5949 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5950 "0799 Target Reset rdata failure: rdata x%px\n", 5951 rdata); 5952 return FAILED; 5953 } 5954 pnode = rdata->pnode; 5955 status = fc_block_scsi_eh(cmnd); 5956 if (status != 0 && status != SUCCESS) 5957 return status; 5958 5959 status = lpfc_chk_tgt_mapped(vport, cmnd); 5960 if (status == FAILED) { 5961 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5962 "0722 Target Reset rport failure: rdata x%px\n", rdata); 5963 if (pnode) { 5964 spin_lock_irqsave(&pnode->lock, flags); 5965 pnode->nlp_flag &= ~NLP_NPR_ADISC; 5966 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 5967 spin_unlock_irqrestore(&pnode->lock, flags); 5968 } 5969 lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 5970 LPFC_CTX_TGT); 5971 return FAST_IO_FAIL; 5972 } 5973 5974 scsi_event.event_type = FC_REG_SCSI_EVENT; 5975 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 5976 scsi_event.lun = 0; 5977 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 5978 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 5979 5980 fc_host_post_vendor_event(shost, fc_get_event_number(), 5981 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 5982 5983 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, 5984 FCP_TARGET_RESET); 5985 if (status != SUCCESS) 5986 logit = LOG_TRACE_EVENT; 5987 spin_lock_irqsave(&pnode->lock, flags); 5988 if (status != SUCCESS && 5989 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) && 5990 !pnode->logo_waitq) { 5991 pnode->logo_waitq = &waitq; 5992 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 5993 pnode->nlp_flag |= NLP_ISSUE_LOGO; 5994 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; 5995 spin_unlock_irqrestore(&pnode->lock, flags); 5996 lpfc_unreg_rpi(vport, pnode); 5997 wait_event_timeout(waitq, 5998 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)), 5999 msecs_to_jiffies(vport->cfg_devloss_tmo * 6000 1000)); 6001 6002 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { 6003 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6004 "0725 SCSI layer TGTRST failed & LOGO TMO " 6005 " (%d, %llu) return x%x\n", tgt_id, 6006 lun_id, status); 6007 spin_lock_irqsave(&pnode->lock, flags); 6008 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 6009 } else { 6010 spin_lock_irqsave(&pnode->lock, flags); 6011 } 6012 pnode->logo_waitq = NULL; 6013 spin_unlock_irqrestore(&pnode->lock, flags); 6014 status = SUCCESS; 6015 } else { 6016 status = FAILED; 6017 spin_unlock_irqrestore(&pnode->lock, flags); 6018 } 6019 6020 lpfc_printf_vlog(vport, KERN_ERR, logit, 6021 "0723 SCSI layer issued Target Reset (%d, %llu) " 6022 "return x%x\n", tgt_id, lun_id, status); 6023 6024 /* 6025 * We have to clean up i/o as : they may be orphaned by the TMF; 6026 * or if the TMF failed, they may be in an indeterminate state. 6027 * So, continue on. 6028 * We will report success if all the i/o aborts successfully. 6029 */ 6030 if (status == SUCCESS) 6031 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 6032 LPFC_CTX_TGT); 6033 return status; 6034 } 6035 6036 /** 6037 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 6038 * @cmnd: Pointer to scsi_cmnd data structure. 6039 * 6040 * This routine does target reset to all targets on @cmnd->device->host. 6041 * This emulates Parallel SCSI Bus Reset Semantics. 6042 * 6043 * Return code : 6044 * 0x2003 - Error 6045 * 0x2002 - Success 6046 **/ 6047 static int 6048 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 6049 { 6050 struct Scsi_Host *shost = cmnd->device->host; 6051 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6052 struct lpfc_nodelist *ndlp = NULL; 6053 struct lpfc_scsi_event_header scsi_event; 6054 int match; 6055 int ret = SUCCESS, status, i; 6056 u32 logit = LOG_FCP; 6057 6058 scsi_event.event_type = FC_REG_SCSI_EVENT; 6059 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 6060 scsi_event.lun = 0; 6061 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 6062 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 6063 6064 fc_host_post_vendor_event(shost, fc_get_event_number(), 6065 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 6066 6067 status = fc_block_scsi_eh(cmnd); 6068 if (status != 0 && status != SUCCESS) 6069 return status; 6070 6071 /* 6072 * Since the driver manages a single bus device, reset all 6073 * targets known to the driver. Should any target reset 6074 * fail, this routine returns failure to the midlayer. 6075 */ 6076 for (i = 0; i < LPFC_MAX_TARGET; i++) { 6077 /* Search for mapped node by target ID */ 6078 match = 0; 6079 spin_lock_irq(shost->host_lock); 6080 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6081 6082 if (vport->phba->cfg_fcp2_no_tgt_reset && 6083 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 6084 continue; 6085 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 6086 ndlp->nlp_sid == i && 6087 ndlp->rport && 6088 ndlp->nlp_type & NLP_FCP_TARGET) { 6089 match = 1; 6090 break; 6091 } 6092 } 6093 spin_unlock_irq(shost->host_lock); 6094 if (!match) 6095 continue; 6096 6097 status = lpfc_send_taskmgmt(vport, cmnd, 6098 i, 0, FCP_TARGET_RESET); 6099 6100 if (status != SUCCESS) { 6101 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6102 "0700 Bus Reset on target %d failed\n", 6103 i); 6104 ret = FAILED; 6105 } 6106 } 6107 /* 6108 * We have to clean up i/o as : they may be orphaned by the TMFs 6109 * above; or if any of the TMFs failed, they may be in an 6110 * indeterminate state. 6111 * We will report success if all the i/o aborts successfully. 6112 */ 6113 6114 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 6115 if (status != SUCCESS) 6116 ret = FAILED; 6117 if (ret == FAILED) 6118 logit = LOG_TRACE_EVENT; 6119 6120 lpfc_printf_vlog(vport, KERN_ERR, logit, 6121 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 6122 return ret; 6123 } 6124 6125 /** 6126 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt 6127 * @cmnd: Pointer to scsi_cmnd data structure. 6128 * 6129 * This routine does host reset to the adaptor port. It brings the HBA 6130 * offline, performs a board restart, and then brings the board back online. 6131 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local 6132 * reject all outstanding SCSI commands to the host and error returned 6133 * back to SCSI mid-level. As this will be SCSI mid-level's last resort 6134 * of error handling, it will only return error if resetting of the adapter 6135 * is not successful; in all other cases, will return success. 6136 * 6137 * Return code : 6138 * 0x2003 - Error 6139 * 0x2002 - Success 6140 **/ 6141 static int 6142 lpfc_host_reset_handler(struct scsi_cmnd *cmnd) 6143 { 6144 struct Scsi_Host *shost = cmnd->device->host; 6145 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 6146 struct lpfc_hba *phba = vport->phba; 6147 int rc, ret = SUCCESS; 6148 6149 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 6150 "3172 SCSI layer issued Host Reset Data:\n"); 6151 6152 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 6153 lpfc_offline(phba); 6154 rc = lpfc_sli_brdrestart(phba); 6155 if (rc) 6156 goto error; 6157 6158 rc = lpfc_online(phba); 6159 if (rc) 6160 goto error; 6161 6162 lpfc_unblock_mgmt_io(phba); 6163 6164 return ret; 6165 error: 6166 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6167 "3323 Failed host reset\n"); 6168 lpfc_unblock_mgmt_io(phba); 6169 return FAILED; 6170 } 6171 6172 /** 6173 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 6174 * @sdev: Pointer to scsi_device. 6175 * 6176 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 6177 * globally available list of scsi buffers. This routine also makes sure scsi 6178 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 6179 * of scsi buffer exists for the lifetime of the driver. 6180 * 6181 * Return codes: 6182 * non-0 - Error 6183 * 0 - Success 6184 **/ 6185 static int 6186 lpfc_slave_alloc(struct scsi_device *sdev) 6187 { 6188 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6189 struct lpfc_hba *phba = vport->phba; 6190 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 6191 uint32_t total = 0; 6192 uint32_t num_to_alloc = 0; 6193 int num_allocated = 0; 6194 uint32_t sdev_cnt; 6195 struct lpfc_device_data *device_data; 6196 unsigned long flags; 6197 struct lpfc_name target_wwpn; 6198 6199 if (!rport || fc_remote_port_chkready(rport)) 6200 return -ENXIO; 6201 6202 if (phba->cfg_fof) { 6203 6204 /* 6205 * Check to see if the device data structure for the lun 6206 * exists. If not, create one. 6207 */ 6208 6209 u64_to_wwn(rport->port_name, target_wwpn.u.wwn); 6210 spin_lock_irqsave(&phba->devicelock, flags); 6211 device_data = __lpfc_get_device_data(phba, 6212 &phba->luns, 6213 &vport->fc_portname, 6214 &target_wwpn, 6215 sdev->lun); 6216 if (!device_data) { 6217 spin_unlock_irqrestore(&phba->devicelock, flags); 6218 device_data = lpfc_create_device_data(phba, 6219 &vport->fc_portname, 6220 &target_wwpn, 6221 sdev->lun, 6222 phba->cfg_XLanePriority, 6223 true); 6224 if (!device_data) 6225 return -ENOMEM; 6226 spin_lock_irqsave(&phba->devicelock, flags); 6227 list_add_tail(&device_data->listentry, &phba->luns); 6228 } 6229 device_data->rport_data = rport->dd_data; 6230 device_data->available = true; 6231 spin_unlock_irqrestore(&phba->devicelock, flags); 6232 sdev->hostdata = device_data; 6233 } else { 6234 sdev->hostdata = rport->dd_data; 6235 } 6236 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 6237 6238 /* For SLI4, all IO buffers are pre-allocated */ 6239 if (phba->sli_rev == LPFC_SLI_REV4) 6240 return 0; 6241 6242 /* This code path is now ONLY for SLI3 adapters */ 6243 6244 /* 6245 * Populate the cmds_per_lun count scsi_bufs into this host's globally 6246 * available list of scsi buffers. Don't allocate more than the 6247 * HBA limit conveyed to the midlayer via the host structure. The 6248 * formula accounts for the lun_queue_depth + error handlers + 1 6249 * extra. This list of scsi bufs exists for the lifetime of the driver. 6250 */ 6251 total = phba->total_scsi_bufs; 6252 num_to_alloc = vport->cfg_lun_queue_depth + 2; 6253 6254 /* If allocated buffers are enough do nothing */ 6255 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 6256 return 0; 6257 6258 /* Allow some exchanges to be available always to complete discovery */ 6259 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6260 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6261 "0704 At limitation of %d preallocated " 6262 "command buffers\n", total); 6263 return 0; 6264 /* Allow some exchanges to be available always to complete discovery */ 6265 } else if (total + num_to_alloc > 6266 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 6267 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 6268 "0705 Allocation request of %d " 6269 "command buffers will exceed max of %d. " 6270 "Reducing allocation request to %d.\n", 6271 num_to_alloc, phba->cfg_hba_queue_depth, 6272 (phba->cfg_hba_queue_depth - total)); 6273 num_to_alloc = phba->cfg_hba_queue_depth - total; 6274 } 6275 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); 6276 if (num_to_alloc != num_allocated) { 6277 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6278 "0708 Allocation request of %d " 6279 "command buffers did not succeed. " 6280 "Allocated %d buffers.\n", 6281 num_to_alloc, num_allocated); 6282 } 6283 if (num_allocated > 0) 6284 phba->total_scsi_bufs += num_allocated; 6285 return 0; 6286 } 6287 6288 /** 6289 * lpfc_slave_configure - scsi_host_template slave_configure entry point 6290 * @sdev: Pointer to scsi_device. 6291 * 6292 * This routine configures following items 6293 * - Tag command queuing support for @sdev if supported. 6294 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 6295 * 6296 * Return codes: 6297 * 0 - Success 6298 **/ 6299 static int 6300 lpfc_slave_configure(struct scsi_device *sdev) 6301 { 6302 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6303 struct lpfc_hba *phba = vport->phba; 6304 6305 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); 6306 6307 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 6308 lpfc_sli_handle_fast_ring_event(phba, 6309 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); 6310 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 6311 lpfc_poll_rearm_timer(phba); 6312 } 6313 6314 return 0; 6315 } 6316 6317 /** 6318 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 6319 * @sdev: Pointer to scsi_device. 6320 * 6321 * This routine sets @sdev hostatdata filed to null. 6322 **/ 6323 static void 6324 lpfc_slave_destroy(struct scsi_device *sdev) 6325 { 6326 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 6327 struct lpfc_hba *phba = vport->phba; 6328 unsigned long flags; 6329 struct lpfc_device_data *device_data = sdev->hostdata; 6330 6331 atomic_dec(&phba->sdev_cnt); 6332 if ((phba->cfg_fof) && (device_data)) { 6333 spin_lock_irqsave(&phba->devicelock, flags); 6334 device_data->available = false; 6335 if (!device_data->oas_enabled) 6336 lpfc_delete_device_data(phba, device_data); 6337 spin_unlock_irqrestore(&phba->devicelock, flags); 6338 } 6339 sdev->hostdata = NULL; 6340 return; 6341 } 6342 6343 /** 6344 * lpfc_create_device_data - creates and initializes device data structure for OAS 6345 * @phba: Pointer to host bus adapter structure. 6346 * @vport_wwpn: Pointer to vport's wwpn information 6347 * @target_wwpn: Pointer to target's wwpn information 6348 * @lun: Lun on target 6349 * @pri: Priority 6350 * @atomic_create: Flag to indicate if memory should be allocated using the 6351 * GFP_ATOMIC flag or not. 6352 * 6353 * This routine creates a device data structure which will contain identifying 6354 * information for the device (host wwpn, target wwpn, lun), state of OAS, 6355 * whether or not the corresponding lun is available by the system, 6356 * and pointer to the rport data. 6357 * 6358 * Return codes: 6359 * NULL - Error 6360 * Pointer to lpfc_device_data - Success 6361 **/ 6362 struct lpfc_device_data* 6363 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6364 struct lpfc_name *target_wwpn, uint64_t lun, 6365 uint32_t pri, bool atomic_create) 6366 { 6367 6368 struct lpfc_device_data *lun_info; 6369 int memory_flags; 6370 6371 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6372 !(phba->cfg_fof)) 6373 return NULL; 6374 6375 /* Attempt to create the device data to contain lun info */ 6376 6377 if (atomic_create) 6378 memory_flags = GFP_ATOMIC; 6379 else 6380 memory_flags = GFP_KERNEL; 6381 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); 6382 if (!lun_info) 6383 return NULL; 6384 INIT_LIST_HEAD(&lun_info->listentry); 6385 lun_info->rport_data = NULL; 6386 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, 6387 sizeof(struct lpfc_name)); 6388 memcpy(&lun_info->device_id.target_wwpn, target_wwpn, 6389 sizeof(struct lpfc_name)); 6390 lun_info->device_id.lun = lun; 6391 lun_info->oas_enabled = false; 6392 lun_info->priority = pri; 6393 lun_info->available = false; 6394 return lun_info; 6395 } 6396 6397 /** 6398 * lpfc_delete_device_data - frees a device data structure for OAS 6399 * @phba: Pointer to host bus adapter structure. 6400 * @lun_info: Pointer to device data structure to free. 6401 * 6402 * This routine frees the previously allocated device data structure passed. 6403 * 6404 **/ 6405 void 6406 lpfc_delete_device_data(struct lpfc_hba *phba, 6407 struct lpfc_device_data *lun_info) 6408 { 6409 6410 if (unlikely(!phba) || !lun_info || 6411 !(phba->cfg_fof)) 6412 return; 6413 6414 if (!list_empty(&lun_info->listentry)) 6415 list_del(&lun_info->listentry); 6416 mempool_free(lun_info, phba->device_data_mem_pool); 6417 return; 6418 } 6419 6420 /** 6421 * __lpfc_get_device_data - returns the device data for the specified lun 6422 * @phba: Pointer to host bus adapter structure. 6423 * @list: Point to list to search. 6424 * @vport_wwpn: Pointer to vport's wwpn information 6425 * @target_wwpn: Pointer to target's wwpn information 6426 * @lun: Lun on target 6427 * 6428 * This routine searches the list passed for the specified lun's device data. 6429 * This function does not hold locks, it is the responsibility of the caller 6430 * to ensure the proper lock is held before calling the function. 6431 * 6432 * Return codes: 6433 * NULL - Error 6434 * Pointer to lpfc_device_data - Success 6435 **/ 6436 struct lpfc_device_data* 6437 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, 6438 struct lpfc_name *vport_wwpn, 6439 struct lpfc_name *target_wwpn, uint64_t lun) 6440 { 6441 6442 struct lpfc_device_data *lun_info; 6443 6444 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || 6445 !phba->cfg_fof) 6446 return NULL; 6447 6448 /* Check to see if the lun is already enabled for OAS. */ 6449 6450 list_for_each_entry(lun_info, list, listentry) { 6451 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6452 sizeof(struct lpfc_name)) == 0) && 6453 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6454 sizeof(struct lpfc_name)) == 0) && 6455 (lun_info->device_id.lun == lun)) 6456 return lun_info; 6457 } 6458 6459 return NULL; 6460 } 6461 6462 /** 6463 * lpfc_find_next_oas_lun - searches for the next oas lun 6464 * @phba: Pointer to host bus adapter structure. 6465 * @vport_wwpn: Pointer to vport's wwpn information 6466 * @target_wwpn: Pointer to target's wwpn information 6467 * @starting_lun: Pointer to the lun to start searching for 6468 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information 6469 * @found_target_wwpn: Pointer to the found lun's target wwpn information 6470 * @found_lun: Pointer to the found lun. 6471 * @found_lun_status: Pointer to status of the found lun. 6472 * @found_lun_pri: Pointer to priority of the found lun. 6473 * 6474 * This routine searches the luns list for the specified lun 6475 * or the first lun for the vport/target. If the vport wwpn contains 6476 * a zero value then a specific vport is not specified. In this case 6477 * any vport which contains the lun will be considered a match. If the 6478 * target wwpn contains a zero value then a specific target is not specified. 6479 * In this case any target which contains the lun will be considered a 6480 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status 6481 * are returned. The function will also return the next lun if available. 6482 * If the next lun is not found, starting_lun parameter will be set to 6483 * NO_MORE_OAS_LUN. 6484 * 6485 * Return codes: 6486 * non-0 - Error 6487 * 0 - Success 6488 **/ 6489 bool 6490 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6491 struct lpfc_name *target_wwpn, uint64_t *starting_lun, 6492 struct lpfc_name *found_vport_wwpn, 6493 struct lpfc_name *found_target_wwpn, 6494 uint64_t *found_lun, 6495 uint32_t *found_lun_status, 6496 uint32_t *found_lun_pri) 6497 { 6498 6499 unsigned long flags; 6500 struct lpfc_device_data *lun_info; 6501 struct lpfc_device_id *device_id; 6502 uint64_t lun; 6503 bool found = false; 6504 6505 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6506 !starting_lun || !found_vport_wwpn || 6507 !found_target_wwpn || !found_lun || !found_lun_status || 6508 (*starting_lun == NO_MORE_OAS_LUN) || 6509 !phba->cfg_fof) 6510 return false; 6511 6512 lun = *starting_lun; 6513 *found_lun = NO_MORE_OAS_LUN; 6514 *starting_lun = NO_MORE_OAS_LUN; 6515 6516 /* Search for lun or the lun closet in value */ 6517 6518 spin_lock_irqsave(&phba->devicelock, flags); 6519 list_for_each_entry(lun_info, &phba->luns, listentry) { 6520 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || 6521 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, 6522 sizeof(struct lpfc_name)) == 0)) && 6523 ((wwn_to_u64(target_wwpn->u.wwn) == 0) || 6524 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, 6525 sizeof(struct lpfc_name)) == 0)) && 6526 (lun_info->oas_enabled)) { 6527 device_id = &lun_info->device_id; 6528 if ((!found) && 6529 ((lun == FIND_FIRST_OAS_LUN) || 6530 (device_id->lun == lun))) { 6531 *found_lun = device_id->lun; 6532 memcpy(found_vport_wwpn, 6533 &device_id->vport_wwpn, 6534 sizeof(struct lpfc_name)); 6535 memcpy(found_target_wwpn, 6536 &device_id->target_wwpn, 6537 sizeof(struct lpfc_name)); 6538 if (lun_info->available) 6539 *found_lun_status = 6540 OAS_LUN_STATUS_EXISTS; 6541 else 6542 *found_lun_status = 0; 6543 *found_lun_pri = lun_info->priority; 6544 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) 6545 memset(vport_wwpn, 0x0, 6546 sizeof(struct lpfc_name)); 6547 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) 6548 memset(target_wwpn, 0x0, 6549 sizeof(struct lpfc_name)); 6550 found = true; 6551 } else if (found) { 6552 *starting_lun = device_id->lun; 6553 memcpy(vport_wwpn, &device_id->vport_wwpn, 6554 sizeof(struct lpfc_name)); 6555 memcpy(target_wwpn, &device_id->target_wwpn, 6556 sizeof(struct lpfc_name)); 6557 break; 6558 } 6559 } 6560 } 6561 spin_unlock_irqrestore(&phba->devicelock, flags); 6562 return found; 6563 } 6564 6565 /** 6566 * lpfc_enable_oas_lun - enables a lun for OAS operations 6567 * @phba: Pointer to host bus adapter structure. 6568 * @vport_wwpn: Pointer to vport's wwpn information 6569 * @target_wwpn: Pointer to target's wwpn information 6570 * @lun: Lun 6571 * @pri: Priority 6572 * 6573 * This routine enables a lun for oas operations. The routines does so by 6574 * doing the following : 6575 * 6576 * 1) Checks to see if the device data for the lun has been created. 6577 * 2) If found, sets the OAS enabled flag if not set and returns. 6578 * 3) Otherwise, creates a device data structure. 6579 * 4) If successfully created, indicates the device data is for an OAS lun, 6580 * indicates the lun is not available and add to the list of luns. 6581 * 6582 * Return codes: 6583 * false - Error 6584 * true - Success 6585 **/ 6586 bool 6587 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6588 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6589 { 6590 6591 struct lpfc_device_data *lun_info; 6592 unsigned long flags; 6593 6594 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6595 !phba->cfg_fof) 6596 return false; 6597 6598 spin_lock_irqsave(&phba->devicelock, flags); 6599 6600 /* Check to see if the device data for the lun has been created */ 6601 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, 6602 target_wwpn, lun); 6603 if (lun_info) { 6604 if (!lun_info->oas_enabled) 6605 lun_info->oas_enabled = true; 6606 lun_info->priority = pri; 6607 spin_unlock_irqrestore(&phba->devicelock, flags); 6608 return true; 6609 } 6610 6611 /* Create an lun info structure and add to list of luns */ 6612 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, 6613 pri, true); 6614 if (lun_info) { 6615 lun_info->oas_enabled = true; 6616 lun_info->priority = pri; 6617 lun_info->available = false; 6618 list_add_tail(&lun_info->listentry, &phba->luns); 6619 spin_unlock_irqrestore(&phba->devicelock, flags); 6620 return true; 6621 } 6622 spin_unlock_irqrestore(&phba->devicelock, flags); 6623 return false; 6624 } 6625 6626 /** 6627 * lpfc_disable_oas_lun - disables a lun for OAS operations 6628 * @phba: Pointer to host bus adapter structure. 6629 * @vport_wwpn: Pointer to vport's wwpn information 6630 * @target_wwpn: Pointer to target's wwpn information 6631 * @lun: Lun 6632 * @pri: Priority 6633 * 6634 * This routine disables a lun for oas operations. The routines does so by 6635 * doing the following : 6636 * 6637 * 1) Checks to see if the device data for the lun is created. 6638 * 2) If present, clears the flag indicating this lun is for OAS. 6639 * 3) If the lun is not available by the system, the device data is 6640 * freed. 6641 * 6642 * Return codes: 6643 * false - Error 6644 * true - Success 6645 **/ 6646 bool 6647 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, 6648 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri) 6649 { 6650 6651 struct lpfc_device_data *lun_info; 6652 unsigned long flags; 6653 6654 if (unlikely(!phba) || !vport_wwpn || !target_wwpn || 6655 !phba->cfg_fof) 6656 return false; 6657 6658 spin_lock_irqsave(&phba->devicelock, flags); 6659 6660 /* Check to see if the lun is available. */ 6661 lun_info = __lpfc_get_device_data(phba, 6662 &phba->luns, vport_wwpn, 6663 target_wwpn, lun); 6664 if (lun_info) { 6665 lun_info->oas_enabled = false; 6666 lun_info->priority = pri; 6667 if (!lun_info->available) 6668 lpfc_delete_device_data(phba, lun_info); 6669 spin_unlock_irqrestore(&phba->devicelock, flags); 6670 return true; 6671 } 6672 6673 spin_unlock_irqrestore(&phba->devicelock, flags); 6674 return false; 6675 } 6676 6677 static int 6678 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) 6679 { 6680 return SCSI_MLQUEUE_HOST_BUSY; 6681 } 6682 6683 static int 6684 lpfc_no_handler(struct scsi_cmnd *cmnd) 6685 { 6686 return FAILED; 6687 } 6688 6689 static int 6690 lpfc_no_slave(struct scsi_device *sdev) 6691 { 6692 return -ENODEV; 6693 } 6694 6695 struct scsi_host_template lpfc_template_nvme = { 6696 .module = THIS_MODULE, 6697 .name = LPFC_DRIVER_NAME, 6698 .proc_name = LPFC_DRIVER_NAME, 6699 .info = lpfc_info, 6700 .queuecommand = lpfc_no_command, 6701 .eh_abort_handler = lpfc_no_handler, 6702 .eh_device_reset_handler = lpfc_no_handler, 6703 .eh_target_reset_handler = lpfc_no_handler, 6704 .eh_bus_reset_handler = lpfc_no_handler, 6705 .eh_host_reset_handler = lpfc_no_handler, 6706 .slave_alloc = lpfc_no_slave, 6707 .slave_configure = lpfc_no_slave, 6708 .scan_finished = lpfc_scan_finished, 6709 .this_id = -1, 6710 .sg_tablesize = 1, 6711 .cmd_per_lun = 1, 6712 .shost_attrs = lpfc_hba_attrs, 6713 .max_sectors = 0xFFFFFFFF, 6714 .vendor_id = LPFC_NL_VENDOR_ID, 6715 .track_queue_depth = 0, 6716 }; 6717 6718 struct scsi_host_template lpfc_template = { 6719 .module = THIS_MODULE, 6720 .name = LPFC_DRIVER_NAME, 6721 .proc_name = LPFC_DRIVER_NAME, 6722 .info = lpfc_info, 6723 .queuecommand = lpfc_queuecommand, 6724 .eh_timed_out = fc_eh_timed_out, 6725 .eh_should_retry_cmd = fc_eh_should_retry_cmd, 6726 .eh_abort_handler = lpfc_abort_handler, 6727 .eh_device_reset_handler = lpfc_device_reset_handler, 6728 .eh_target_reset_handler = lpfc_target_reset_handler, 6729 .eh_bus_reset_handler = lpfc_bus_reset_handler, 6730 .eh_host_reset_handler = lpfc_host_reset_handler, 6731 .slave_alloc = lpfc_slave_alloc, 6732 .slave_configure = lpfc_slave_configure, 6733 .slave_destroy = lpfc_slave_destroy, 6734 .scan_finished = lpfc_scan_finished, 6735 .this_id = -1, 6736 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 6737 .cmd_per_lun = LPFC_CMD_PER_LUN, 6738 .shost_attrs = lpfc_hba_attrs, 6739 .max_sectors = 0xFFFFFFFF, 6740 .vendor_id = LPFC_NL_VENDOR_ID, 6741 .change_queue_depth = scsi_change_queue_depth, 6742 .track_queue_depth = 1, 6743 }; 6744