1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 #include <linux/pci.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_eh.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_tcq.h> 32 #include <scsi/scsi_transport_fc.h> 33 34 #include "lpfc_version.h" 35 #include "lpfc_hw4.h" 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_sli4.h" 39 #include "lpfc_nl.h" 40 #include "lpfc_disc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc.h" 43 #include "lpfc_logmsg.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_vport.h" 46 47 #define LPFC_RESET_WAIT 2 48 #define LPFC_ABORT_WAIT 2 49 50 int _dump_buf_done; 51 52 static char *dif_op_str[] = { 53 "SCSI_PROT_NORMAL", 54 "SCSI_PROT_READ_INSERT", 55 "SCSI_PROT_WRITE_STRIP", 56 "SCSI_PROT_READ_STRIP", 57 "SCSI_PROT_WRITE_INSERT", 58 "SCSI_PROT_READ_PASS", 59 "SCSI_PROT_WRITE_PASS", 60 }; 61 static void 62 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 63 static void 64 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 65 66 static void 67 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 68 { 69 void *src, *dst; 70 struct scatterlist *sgde = scsi_sglist(cmnd); 71 72 if (!_dump_buf_data) { 73 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 75 __func__); 76 return; 77 } 78 79 80 if (!sgde) { 81 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 82 "9051 BLKGRD: ERROR: data scatterlist is null\n"); 83 return; 84 } 85 86 dst = (void *) _dump_buf_data; 87 while (sgde) { 88 src = sg_virt(sgde); 89 memcpy(dst, src, sgde->length); 90 dst += sgde->length; 91 sgde = sg_next(sgde); 92 } 93 } 94 95 static void 96 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 97 { 98 void *src, *dst; 99 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 100 101 if (!_dump_buf_dif) { 102 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 104 __func__); 105 return; 106 } 107 108 if (!sgde) { 109 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 110 "9053 BLKGRD: ERROR: prot scatterlist is null\n"); 111 return; 112 } 113 114 dst = _dump_buf_dif; 115 while (sgde) { 116 src = sg_virt(sgde); 117 memcpy(dst, src, sgde->length); 118 dst += sgde->length; 119 sgde = sg_next(sgde); 120 } 121 } 122 123 /** 124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 125 * @phba: Pointer to HBA object. 126 * @lpfc_cmd: lpfc scsi command object pointer. 127 * 128 * This function is called from the lpfc_prep_task_mgmt_cmd function to 129 * set the last bit in the response sge entry. 130 **/ 131 static void 132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 133 struct lpfc_scsi_buf *lpfc_cmd) 134 { 135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 136 if (sgl) { 137 sgl += 1; 138 sgl->word2 = le32_to_cpu(sgl->word2); 139 bf_set(lpfc_sli4_sge_last, sgl, 1); 140 sgl->word2 = cpu_to_le32(sgl->word2); 141 } 142 } 143 144 /** 145 * lpfc_update_stats - Update statistical data for the command completion 146 * @phba: Pointer to HBA object. 147 * @lpfc_cmd: lpfc scsi command object pointer. 148 * 149 * This function is called when there is a command completion and this 150 * function updates the statistical data for the command completion. 151 **/ 152 static void 153 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 154 { 155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 156 struct lpfc_nodelist *pnode = rdata->pnode; 157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 158 unsigned long flags; 159 struct Scsi_Host *shost = cmd->device->host; 160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 161 unsigned long latency; 162 int i; 163 164 if (cmd->result) 165 return; 166 167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 168 169 spin_lock_irqsave(shost->host_lock, flags); 170 if (!vport->stat_data_enabled || 171 vport->stat_data_blocked || 172 !pnode->lat_data || 173 (phba->bucket_type == LPFC_NO_BUCKET)) { 174 spin_unlock_irqrestore(shost->host_lock, flags); 175 return; 176 } 177 178 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 179 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 180 phba->bucket_step; 181 /* check array subscript bounds */ 182 if (i < 0) 183 i = 0; 184 else if (i >= LPFC_MAX_BUCKET_COUNT) 185 i = LPFC_MAX_BUCKET_COUNT - 1; 186 } else { 187 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 188 if (latency <= (phba->bucket_base + 189 ((1<<i)*phba->bucket_step))) 190 break; 191 } 192 193 pnode->lat_data[i].cmd_count++; 194 spin_unlock_irqrestore(shost->host_lock, flags); 195 } 196 197 /** 198 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event 199 * @phba: Pointer to HBA context object. 200 * @vport: Pointer to vport object. 201 * @ndlp: Pointer to FC node associated with the target. 202 * @lun: Lun number of the scsi device. 203 * @old_val: Old value of the queue depth. 204 * @new_val: New value of the queue depth. 205 * 206 * This function sends an event to the mgmt application indicating 207 * there is a change in the scsi device queue depth. 208 **/ 209 static void 210 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, 211 struct lpfc_vport *vport, 212 struct lpfc_nodelist *ndlp, 213 uint32_t lun, 214 uint32_t old_val, 215 uint32_t new_val) 216 { 217 struct lpfc_fast_path_event *fast_path_evt; 218 unsigned long flags; 219 220 fast_path_evt = lpfc_alloc_fast_evt(phba); 221 if (!fast_path_evt) 222 return; 223 224 fast_path_evt->un.queue_depth_evt.scsi_event.event_type = 225 FC_REG_SCSI_EVENT; 226 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = 227 LPFC_EVENT_VARQUEDEPTH; 228 229 /* Report all luns with change in queue depth */ 230 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; 231 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 232 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, 233 &ndlp->nlp_portname, sizeof(struct lpfc_name)); 234 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, 235 &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 236 } 237 238 fast_path_evt->un.queue_depth_evt.oldval = old_val; 239 fast_path_evt->un.queue_depth_evt.newval = new_val; 240 fast_path_evt->vport = vport; 241 242 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 243 spin_lock_irqsave(&phba->hbalock, flags); 244 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 245 spin_unlock_irqrestore(&phba->hbalock, flags); 246 lpfc_worker_wake_up(phba); 247 248 return; 249 } 250 251 /** 252 * lpfc_change_queue_depth - Alter scsi device queue depth 253 * @sdev: Pointer the scsi device on which to change the queue depth. 254 * @qdepth: New queue depth to set the sdev to. 255 * @reason: The reason for the queue depth change. 256 * 257 * This function is called by the midlayer and the LLD to alter the queue 258 * depth for a scsi device. This function sets the queue depth to the new 259 * value and sends an event out to log the queue depth change. 260 **/ 261 int 262 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 263 { 264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 265 struct lpfc_hba *phba = vport->phba; 266 struct lpfc_rport_data *rdata; 267 unsigned long new_queue_depth, old_queue_depth; 268 269 old_queue_depth = sdev->queue_depth; 270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 271 new_queue_depth = sdev->queue_depth; 272 rdata = sdev->hostdata; 273 if (rdata) 274 lpfc_send_sdev_queuedepth_change_event(phba, vport, 275 rdata->pnode, sdev->lun, 276 old_queue_depth, 277 new_queue_depth); 278 return sdev->queue_depth; 279 } 280 281 /** 282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 283 * @phba: The Hba for which this call is being executed. 284 * 285 * This routine is called when there is resource error in driver or firmware. 286 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 287 * posts at most 1 event each second. This routine wakes up worker thread of 288 * @phba to process WORKER_RAM_DOWN_EVENT event. 289 * 290 * This routine should be called with no lock held. 291 **/ 292 void 293 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 294 { 295 unsigned long flags; 296 uint32_t evt_posted; 297 298 spin_lock_irqsave(&phba->hbalock, flags); 299 atomic_inc(&phba->num_rsrc_err); 300 phba->last_rsrc_error_time = jiffies; 301 302 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 303 spin_unlock_irqrestore(&phba->hbalock, flags); 304 return; 305 } 306 307 phba->last_ramp_down_time = jiffies; 308 309 spin_unlock_irqrestore(&phba->hbalock, flags); 310 311 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 312 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 313 if (!evt_posted) 314 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 315 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 316 317 if (!evt_posted) 318 lpfc_worker_wake_up(phba); 319 return; 320 } 321 322 /** 323 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread 324 * @phba: The Hba for which this call is being executed. 325 * 326 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine 327 * post at most 1 event every 5 minute after last_ramp_up_time or 328 * last_rsrc_error_time. This routine wakes up worker thread of @phba 329 * to process WORKER_RAM_DOWN_EVENT event. 330 * 331 * This routine should be called with no lock held. 332 **/ 333 static inline void 334 lpfc_rampup_queue_depth(struct lpfc_vport *vport, 335 uint32_t queue_depth) 336 { 337 unsigned long flags; 338 struct lpfc_hba *phba = vport->phba; 339 uint32_t evt_posted; 340 atomic_inc(&phba->num_cmd_success); 341 342 if (vport->cfg_lun_queue_depth <= queue_depth) 343 return; 344 spin_lock_irqsave(&phba->hbalock, flags); 345 if (time_before(jiffies, 346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || 347 time_before(jiffies, 348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { 349 spin_unlock_irqrestore(&phba->hbalock, flags); 350 return; 351 } 352 phba->last_ramp_up_time = jiffies; 353 spin_unlock_irqrestore(&phba->hbalock, flags); 354 355 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 356 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; 357 if (!evt_posted) 358 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 359 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 360 361 if (!evt_posted) 362 lpfc_worker_wake_up(phba); 363 return; 364 } 365 366 /** 367 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 368 * @phba: The Hba for which this call is being executed. 369 * 370 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 371 * thread.This routine reduces queue depth for all scsi device on each vport 372 * associated with @phba. 373 **/ 374 void 375 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 376 { 377 struct lpfc_vport **vports; 378 struct Scsi_Host *shost; 379 struct scsi_device *sdev; 380 unsigned long new_queue_depth; 381 unsigned long num_rsrc_err, num_cmd_success; 382 int i; 383 384 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 385 num_cmd_success = atomic_read(&phba->num_cmd_success); 386 387 vports = lpfc_create_vport_work_array(phba); 388 if (vports != NULL) 389 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 390 shost = lpfc_shost_from_vport(vports[i]); 391 shost_for_each_device(sdev, shost) { 392 new_queue_depth = 393 sdev->queue_depth * num_rsrc_err / 394 (num_rsrc_err + num_cmd_success); 395 if (!new_queue_depth) 396 new_queue_depth = sdev->queue_depth - 1; 397 else 398 new_queue_depth = sdev->queue_depth - 399 new_queue_depth; 400 lpfc_change_queue_depth(sdev, new_queue_depth, 401 SCSI_QDEPTH_DEFAULT); 402 } 403 } 404 lpfc_destroy_vport_work_array(phba, vports); 405 atomic_set(&phba->num_rsrc_err, 0); 406 atomic_set(&phba->num_cmd_success, 0); 407 } 408 409 /** 410 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler 411 * @phba: The Hba for which this call is being executed. 412 * 413 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker 414 * thread.This routine increases queue depth for all scsi device on each vport 415 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and 416 * num_cmd_success to zero. 417 **/ 418 void 419 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 420 { 421 struct lpfc_vport **vports; 422 struct Scsi_Host *shost; 423 struct scsi_device *sdev; 424 int i; 425 426 vports = lpfc_create_vport_work_array(phba); 427 if (vports != NULL) 428 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 429 shost = lpfc_shost_from_vport(vports[i]); 430 shost_for_each_device(sdev, shost) { 431 if (vports[i]->cfg_lun_queue_depth <= 432 sdev->queue_depth) 433 continue; 434 lpfc_change_queue_depth(sdev, 435 sdev->queue_depth+1, 436 SCSI_QDEPTH_RAMP_UP); 437 } 438 } 439 lpfc_destroy_vport_work_array(phba, vports); 440 atomic_set(&phba->num_rsrc_err, 0); 441 atomic_set(&phba->num_cmd_success, 0); 442 } 443 444 /** 445 * lpfc_scsi_dev_block - set all scsi hosts to block state 446 * @phba: Pointer to HBA context object. 447 * 448 * This function walks vport list and set each SCSI host to block state 449 * by invoking fc_remote_port_delete() routine. This function is invoked 450 * with EEH when device's PCI slot has been permanently disabled. 451 **/ 452 void 453 lpfc_scsi_dev_block(struct lpfc_hba *phba) 454 { 455 struct lpfc_vport **vports; 456 struct Scsi_Host *shost; 457 struct scsi_device *sdev; 458 struct fc_rport *rport; 459 int i; 460 461 vports = lpfc_create_vport_work_array(phba); 462 if (vports != NULL) 463 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 464 shost = lpfc_shost_from_vport(vports[i]); 465 shost_for_each_device(sdev, shost) { 466 rport = starget_to_rport(scsi_target(sdev)); 467 fc_remote_port_delete(rport); 468 } 469 } 470 lpfc_destroy_vport_work_array(phba, vports); 471 } 472 473 /** 474 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 475 * @vport: The virtual port for which this call being executed. 476 * @num_to_allocate: The requested number of buffers to allocate. 477 * 478 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 479 * the scsi buffer contains all the necessary information needed to initiate 480 * a SCSI I/O. The non-DMAable buffer region contains information to build 481 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 482 * and the initial BPL. In addition to allocating memory, the FCP CMND and 483 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 484 * 485 * Return codes: 486 * int - number of scsi buffers that were allocated. 487 * 0 = failure, less than num_to_alloc is a partial failure. 488 **/ 489 static int 490 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 491 { 492 struct lpfc_hba *phba = vport->phba; 493 struct lpfc_scsi_buf *psb; 494 struct ulp_bde64 *bpl; 495 IOCB_t *iocb; 496 dma_addr_t pdma_phys_fcp_cmd; 497 dma_addr_t pdma_phys_fcp_rsp; 498 dma_addr_t pdma_phys_bpl; 499 uint16_t iotag; 500 int bcnt; 501 502 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 503 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 504 if (!psb) 505 break; 506 507 /* 508 * Get memory from the pci pool to map the virt space to pci 509 * bus space for an I/O. The DMA buffer includes space for the 510 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 511 * necessary to support the sg_tablesize. 512 */ 513 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 514 GFP_KERNEL, &psb->dma_handle); 515 if (!psb->data) { 516 kfree(psb); 517 break; 518 } 519 520 /* Initialize virtual ptrs to dma_buf region. */ 521 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 522 523 /* Allocate iotag for psb->cur_iocbq. */ 524 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 525 if (iotag == 0) { 526 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 527 psb->data, psb->dma_handle); 528 kfree(psb); 529 break; 530 } 531 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 532 533 psb->fcp_cmnd = psb->data; 534 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 535 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 536 sizeof(struct fcp_rsp); 537 538 /* Initialize local short-hand pointers. */ 539 bpl = psb->fcp_bpl; 540 pdma_phys_fcp_cmd = psb->dma_handle; 541 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 542 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 543 sizeof(struct fcp_rsp); 544 545 /* 546 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 547 * are sg list bdes. Initialize the first two and leave the 548 * rest for queuecommand. 549 */ 550 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 551 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 552 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 553 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 554 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 555 556 /* Setup the physical region for the FCP RSP */ 557 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 558 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 559 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 560 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 561 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 562 563 /* 564 * Since the IOCB for the FCP I/O is built into this 565 * lpfc_scsi_buf, initialize it with all known data now. 566 */ 567 iocb = &psb->cur_iocbq.iocb; 568 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 569 if ((phba->sli_rev == 3) && 570 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 571 /* fill in immediate fcp command BDE */ 572 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 573 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 574 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 575 unsli3.fcp_ext.icd); 576 iocb->un.fcpi64.bdl.addrHigh = 0; 577 iocb->ulpBdeCount = 0; 578 iocb->ulpLe = 0; 579 /* fill in responce BDE */ 580 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 581 BUFF_TYPE_BDE_64; 582 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 583 sizeof(struct fcp_rsp); 584 iocb->unsli3.fcp_ext.rbde.addrLow = 585 putPaddrLow(pdma_phys_fcp_rsp); 586 iocb->unsli3.fcp_ext.rbde.addrHigh = 587 putPaddrHigh(pdma_phys_fcp_rsp); 588 } else { 589 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 590 iocb->un.fcpi64.bdl.bdeSize = 591 (2 * sizeof(struct ulp_bde64)); 592 iocb->un.fcpi64.bdl.addrLow = 593 putPaddrLow(pdma_phys_bpl); 594 iocb->un.fcpi64.bdl.addrHigh = 595 putPaddrHigh(pdma_phys_bpl); 596 iocb->ulpBdeCount = 1; 597 iocb->ulpLe = 1; 598 } 599 iocb->ulpClass = CLASS3; 600 psb->status = IOSTAT_SUCCESS; 601 /* Put it back into the SCSI buffer list */ 602 lpfc_release_scsi_buf_s3(phba, psb); 603 604 } 605 606 return bcnt; 607 } 608 609 /** 610 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort 611 * @phba: pointer to lpfc hba data structure. 612 * @axri: pointer to the fcp xri abort wcqe structure. 613 * 614 * This routine is invoked by the worker thread to process a SLI4 fast-path 615 * FCP aborted xri. 616 **/ 617 void 618 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, 619 struct sli4_wcqe_xri_aborted *axri) 620 { 621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 622 struct lpfc_scsi_buf *psb, *next_psb; 623 unsigned long iflag = 0; 624 struct lpfc_iocbq *iocbq; 625 int i; 626 627 spin_lock_irqsave(&phba->hbalock, iflag); 628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 629 list_for_each_entry_safe(psb, next_psb, 630 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 631 if (psb->cur_iocbq.sli4_xritag == xri) { 632 list_del(&psb->list); 633 psb->exch_busy = 0; 634 psb->status = IOSTAT_SUCCESS; 635 spin_unlock( 636 &phba->sli4_hba.abts_scsi_buf_list_lock); 637 spin_unlock_irqrestore(&phba->hbalock, iflag); 638 lpfc_release_scsi_buf_s4(phba, psb); 639 return; 640 } 641 } 642 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 643 for (i = 1; i <= phba->sli.last_iotag; i++) { 644 iocbq = phba->sli.iocbq_lookup[i]; 645 646 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 647 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 648 continue; 649 if (iocbq->sli4_xritag != xri) 650 continue; 651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 652 psb->exch_busy = 0; 653 spin_unlock_irqrestore(&phba->hbalock, iflag); 654 return; 655 656 } 657 spin_unlock_irqrestore(&phba->hbalock, iflag); 658 } 659 660 /** 661 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block 662 * @phba: pointer to lpfc hba data structure. 663 * 664 * This routine walks the list of scsi buffers that have been allocated and 665 * repost them to the HBA by using SGL block post. This is needed after a 666 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 667 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 668 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 669 * 670 * Returns: 0 = success, non-zero failure. 671 **/ 672 int 673 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 674 { 675 struct lpfc_scsi_buf *psb; 676 int index, status, bcnt = 0, rcnt = 0, rc = 0; 677 LIST_HEAD(sblist); 678 679 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { 680 psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; 681 if (psb) { 682 /* Remove from SCSI buffer list */ 683 list_del(&psb->list); 684 /* Add it to a local SCSI buffer list */ 685 list_add_tail(&psb->list, &sblist); 686 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { 687 bcnt = rcnt; 688 rcnt = 0; 689 } 690 } else 691 /* A hole present in the XRI array, need to skip */ 692 bcnt = rcnt; 693 694 if (index == phba->sli4_hba.scsi_xri_cnt - 1) 695 /* End of XRI array for SCSI buffer, complete */ 696 bcnt = rcnt; 697 698 /* Continue until collect up to a nembed page worth of sgls */ 699 if (bcnt == 0) 700 continue; 701 /* Now, post the SCSI buffer list sgls as a block */ 702 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 703 /* Reset SCSI buffer count for next round of posting */ 704 bcnt = 0; 705 while (!list_empty(&sblist)) { 706 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 707 list); 708 if (status) { 709 /* Put this back on the abort scsi list */ 710 psb->exch_busy = 1; 711 rc++; 712 } else { 713 psb->exch_busy = 0; 714 psb->status = IOSTAT_SUCCESS; 715 } 716 /* Put it back into the SCSI buffer list */ 717 lpfc_release_scsi_buf_s4(phba, psb); 718 } 719 } 720 return rc; 721 } 722 723 /** 724 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec 725 * @vport: The virtual port for which this call being executed. 726 * @num_to_allocate: The requested number of buffers to allocate. 727 * 728 * This routine allocates a scsi buffer for device with SLI-4 interface spec, 729 * the scsi buffer contains all the necessary information needed to initiate 730 * a SCSI I/O. 731 * 732 * Return codes: 733 * int - number of scsi buffers that were allocated. 734 * 0 = failure, less than num_to_alloc is a partial failure. 735 **/ 736 static int 737 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) 738 { 739 struct lpfc_hba *phba = vport->phba; 740 struct lpfc_scsi_buf *psb; 741 struct sli4_sge *sgl; 742 IOCB_t *iocb; 743 dma_addr_t pdma_phys_fcp_cmd; 744 dma_addr_t pdma_phys_fcp_rsp; 745 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 746 uint16_t iotag, last_xritag = NO_XRI; 747 int status = 0, index; 748 int bcnt; 749 int non_sequential_xri = 0; 750 int rc = 0; 751 LIST_HEAD(sblist); 752 753 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 754 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 755 if (!psb) 756 break; 757 758 /* 759 * Get memory from the pci pool to map the virt space to pci bus 760 * space for an I/O. The DMA buffer includes space for the 761 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 762 * necessary to support the sg_tablesize. 763 */ 764 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 765 GFP_KERNEL, &psb->dma_handle); 766 if (!psb->data) { 767 kfree(psb); 768 break; 769 } 770 771 /* Initialize virtual ptrs to dma_buf region. */ 772 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 773 774 /* Allocate iotag for psb->cur_iocbq. */ 775 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 776 if (iotag == 0) { 777 kfree(psb); 778 break; 779 } 780 781 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 782 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 783 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 784 psb->data, psb->dma_handle); 785 kfree(psb); 786 break; 787 } 788 if (last_xritag != NO_XRI 789 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 790 non_sequential_xri = 1; 791 } else 792 list_add_tail(&psb->list, &sblist); 793 last_xritag = psb->cur_iocbq.sli4_xritag; 794 795 index = phba->sli4_hba.scsi_xri_cnt++; 796 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 797 798 psb->fcp_bpl = psb->data; 799 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 800 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 801 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 802 sizeof(struct fcp_cmnd)); 803 804 /* Initialize local short-hand pointers. */ 805 sgl = (struct sli4_sge *)psb->fcp_bpl; 806 pdma_phys_bpl = psb->dma_handle; 807 pdma_phys_fcp_cmd = 808 (psb->dma_handle + phba->cfg_sg_dma_buf_size) 809 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 810 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 811 812 /* 813 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 814 * are sg list bdes. Initialize the first two and leave the 815 * rest for queuecommand. 816 */ 817 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 818 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 819 bf_set(lpfc_sli4_sge_last, sgl, 0); 820 sgl->word2 = cpu_to_le32(sgl->word2); 821 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 822 sgl++; 823 824 /* Setup the physical region for the FCP RSP */ 825 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 826 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 827 bf_set(lpfc_sli4_sge_last, sgl, 1); 828 sgl->word2 = cpu_to_le32(sgl->word2); 829 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 830 831 /* 832 * Since the IOCB for the FCP I/O is built into this 833 * lpfc_scsi_buf, initialize it with all known data now. 834 */ 835 iocb = &psb->cur_iocbq.iocb; 836 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 837 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 838 /* setting the BLP size to 2 * sizeof BDE may not be correct. 839 * We are setting the bpl to point to out sgl. An sgl's 840 * entries are 16 bytes, a bpl entries are 12 bytes. 841 */ 842 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 843 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); 844 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); 845 iocb->ulpBdeCount = 1; 846 iocb->ulpLe = 1; 847 iocb->ulpClass = CLASS3; 848 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 849 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 850 else 851 pdma_phys_bpl1 = 0; 852 psb->dma_phys_bpl = pdma_phys_bpl; 853 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; 854 if (non_sequential_xri) { 855 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, 856 pdma_phys_bpl1, 857 psb->cur_iocbq.sli4_xritag); 858 if (status) { 859 /* Put this back on the abort scsi list */ 860 psb->exch_busy = 1; 861 rc++; 862 } else { 863 psb->exch_busy = 0; 864 psb->status = IOSTAT_SUCCESS; 865 } 866 /* Put it back into the SCSI buffer list */ 867 lpfc_release_scsi_buf_s4(phba, psb); 868 break; 869 } 870 } 871 if (bcnt) { 872 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 873 /* Reset SCSI buffer count for next round of posting */ 874 while (!list_empty(&sblist)) { 875 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 876 list); 877 if (status) { 878 /* Put this back on the abort scsi list */ 879 psb->exch_busy = 1; 880 rc++; 881 } else { 882 psb->exch_busy = 0; 883 psb->status = IOSTAT_SUCCESS; 884 } 885 /* Put it back into the SCSI buffer list */ 886 lpfc_release_scsi_buf_s4(phba, psb); 887 } 888 } 889 890 return bcnt + non_sequential_xri - rc; 891 } 892 893 /** 894 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator 895 * @vport: The virtual port for which this call being executed. 896 * @num_to_allocate: The requested number of buffers to allocate. 897 * 898 * This routine wraps the actual SCSI buffer allocator function pointer from 899 * the lpfc_hba struct. 900 * 901 * Return codes: 902 * int - number of scsi buffers that were allocated. 903 * 0 = failure, less than num_to_alloc is a partial failure. 904 **/ 905 static inline int 906 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) 907 { 908 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); 909 } 910 911 /** 912 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 913 * @phba: The HBA for which this call is being executed. 914 * 915 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 916 * and returns to caller. 917 * 918 * Return codes: 919 * NULL - Error 920 * Pointer to lpfc_scsi_buf - Success 921 **/ 922 static struct lpfc_scsi_buf* 923 lpfc_get_scsi_buf(struct lpfc_hba * phba) 924 { 925 struct lpfc_scsi_buf * lpfc_cmd = NULL; 926 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 927 unsigned long iflag = 0; 928 929 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 930 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 931 if (lpfc_cmd) { 932 lpfc_cmd->seg_cnt = 0; 933 lpfc_cmd->nonsg_phys = 0; 934 lpfc_cmd->prot_seg_cnt = 0; 935 } 936 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 937 return lpfc_cmd; 938 } 939 940 /** 941 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list 942 * @phba: The Hba for which this call is being executed. 943 * @psb: The scsi buffer which is being released. 944 * 945 * This routine releases @psb scsi buffer by adding it to tail of @phba 946 * lpfc_scsi_buf_list list. 947 **/ 948 static void 949 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 950 { 951 unsigned long iflag = 0; 952 953 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 954 psb->pCmd = NULL; 955 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 956 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 957 } 958 959 /** 960 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 961 * @phba: The Hba for which this call is being executed. 962 * @psb: The scsi buffer which is being released. 963 * 964 * This routine releases @psb scsi buffer by adding it to tail of @phba 965 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer 966 * and cannot be reused for at least RA_TOV amount of time if it was 967 * aborted. 968 **/ 969 static void 970 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 971 { 972 unsigned long iflag = 0; 973 974 if (psb->exch_busy) { 975 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 976 iflag); 977 psb->pCmd = NULL; 978 list_add_tail(&psb->list, 979 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 980 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 981 iflag); 982 } else { 983 984 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 985 psb->pCmd = NULL; 986 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 987 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 988 } 989 } 990 991 /** 992 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 993 * @phba: The Hba for which this call is being executed. 994 * @psb: The scsi buffer which is being released. 995 * 996 * This routine releases @psb scsi buffer by adding it to tail of @phba 997 * lpfc_scsi_buf_list list. 998 **/ 999 static void 1000 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1001 { 1002 1003 phba->lpfc_release_scsi_buf(phba, psb); 1004 } 1005 1006 /** 1007 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1008 * @phba: The Hba for which this call is being executed. 1009 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1010 * 1011 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1012 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 1013 * through sg elements and format the bdea. This routine also initializes all 1014 * IOCB fields which are dependent on scsi command request buffer. 1015 * 1016 * Return codes: 1017 * 1 - Error 1018 * 0 - Success 1019 **/ 1020 static int 1021 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1022 { 1023 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1024 struct scatterlist *sgel = NULL; 1025 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1026 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1027 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 1028 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1029 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1030 dma_addr_t physaddr; 1031 uint32_t num_bde = 0; 1032 int nseg, datadir = scsi_cmnd->sc_data_direction; 1033 1034 /* 1035 * There are three possibilities here - use scatter-gather segment, use 1036 * the single mapping, or neither. Start the lpfc command prep by 1037 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1038 * data bde entry. 1039 */ 1040 bpl += 2; 1041 if (scsi_sg_count(scsi_cmnd)) { 1042 /* 1043 * The driver stores the segment count returned from pci_map_sg 1044 * because this a count of dma-mappings used to map the use_sg 1045 * pages. They are not guaranteed to be the same for those 1046 * architectures that implement an IOMMU. 1047 */ 1048 1049 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 1050 scsi_sg_count(scsi_cmnd), datadir); 1051 if (unlikely(!nseg)) 1052 return 1; 1053 1054 lpfc_cmd->seg_cnt = nseg; 1055 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1056 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1057 "9064 BLKGRD: %s: Too many sg segments from " 1058 "dma_map_sg. Config %d, seg_cnt %d\n", 1059 __func__, phba->cfg_sg_seg_cnt, 1060 lpfc_cmd->seg_cnt); 1061 scsi_dma_unmap(scsi_cmnd); 1062 return 1; 1063 } 1064 1065 /* 1066 * The driver established a maximum scatter-gather segment count 1067 * during probe that limits the number of sg elements in any 1068 * single scsi command. Just run through the seg_cnt and format 1069 * the bde's. 1070 * When using SLI-3 the driver will try to fit all the BDEs into 1071 * the IOCB. If it can't then the BDEs get added to a BPL as it 1072 * does for SLI-2 mode. 1073 */ 1074 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1075 physaddr = sg_dma_address(sgel); 1076 if (phba->sli_rev == 3 && 1077 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1078 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 1079 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1080 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1081 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1082 data_bde->addrLow = putPaddrLow(physaddr); 1083 data_bde->addrHigh = putPaddrHigh(physaddr); 1084 data_bde++; 1085 } else { 1086 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1087 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1088 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1089 bpl->addrLow = 1090 le32_to_cpu(putPaddrLow(physaddr)); 1091 bpl->addrHigh = 1092 le32_to_cpu(putPaddrHigh(physaddr)); 1093 bpl++; 1094 } 1095 } 1096 } 1097 1098 /* 1099 * Finish initializing those IOCB fields that are dependent on the 1100 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1101 * explicitly reinitialized and for SLI-3 the extended bde count is 1102 * explicitly reinitialized since all iocb memory resources are reused. 1103 */ 1104 if (phba->sli_rev == 3 && 1105 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1106 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 1107 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1108 /* 1109 * The extended IOCB format can only fit 3 BDE or a BPL. 1110 * This I/O has more than 3 BDE so the 1st data bde will 1111 * be a BPL that is filled in here. 1112 */ 1113 physaddr = lpfc_cmd->dma_handle; 1114 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 1115 data_bde->tus.f.bdeSize = (num_bde * 1116 sizeof(struct ulp_bde64)); 1117 physaddr += (sizeof(struct fcp_cmnd) + 1118 sizeof(struct fcp_rsp) + 1119 (2 * sizeof(struct ulp_bde64))); 1120 data_bde->addrHigh = putPaddrHigh(physaddr); 1121 data_bde->addrLow = putPaddrLow(physaddr); 1122 /* ebde count includes the responce bde and data bpl */ 1123 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 1124 } else { 1125 /* ebde count includes the responce bde and data bdes */ 1126 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1127 } 1128 } else { 1129 iocb_cmd->un.fcpi64.bdl.bdeSize = 1130 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1131 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1132 } 1133 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1134 1135 /* 1136 * Due to difference in data length between DIF/non-DIF paths, 1137 * we need to set word 4 of IOCB here 1138 */ 1139 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1140 return 0; 1141 } 1142 1143 /* 1144 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it 1145 * @sc: The SCSI command to examine 1146 * @txopt: (out) BlockGuard operation for transmitted data 1147 * @rxopt: (out) BlockGuard operation for received data 1148 * 1149 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1150 * 1151 */ 1152 static int 1153 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1154 uint8_t *txop, uint8_t *rxop) 1155 { 1156 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1157 uint8_t ret = 0; 1158 1159 if (guard_type == SHOST_DIX_GUARD_IP) { 1160 switch (scsi_get_prot_op(sc)) { 1161 case SCSI_PROT_READ_INSERT: 1162 case SCSI_PROT_WRITE_STRIP: 1163 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1164 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1165 break; 1166 1167 case SCSI_PROT_READ_STRIP: 1168 case SCSI_PROT_WRITE_INSERT: 1169 *txop = BG_OP_IN_NODIF_OUT_CRC; 1170 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1171 break; 1172 1173 case SCSI_PROT_READ_PASS: 1174 case SCSI_PROT_WRITE_PASS: 1175 *txop = BG_OP_IN_CSUM_OUT_CRC; 1176 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1177 break; 1178 1179 case SCSI_PROT_NORMAL: 1180 default: 1181 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1182 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1183 scsi_get_prot_op(sc), guard_type); 1184 ret = 1; 1185 break; 1186 1187 } 1188 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1189 switch (scsi_get_prot_op(sc)) { 1190 case SCSI_PROT_READ_STRIP: 1191 case SCSI_PROT_WRITE_INSERT: 1192 *txop = BG_OP_IN_NODIF_OUT_CRC; 1193 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1194 break; 1195 1196 case SCSI_PROT_READ_PASS: 1197 case SCSI_PROT_WRITE_PASS: 1198 *txop = BG_OP_IN_CRC_OUT_CRC; 1199 *rxop = BG_OP_IN_CRC_OUT_CRC; 1200 break; 1201 1202 case SCSI_PROT_READ_INSERT: 1203 case SCSI_PROT_WRITE_STRIP: 1204 case SCSI_PROT_NORMAL: 1205 default: 1206 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1207 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1208 scsi_get_prot_op(sc), guard_type); 1209 ret = 1; 1210 break; 1211 } 1212 } else { 1213 /* unsupported format */ 1214 BUG(); 1215 } 1216 1217 return ret; 1218 } 1219 1220 struct scsi_dif_tuple { 1221 __be16 guard_tag; /* Checksum */ 1222 __be16 app_tag; /* Opaque storage */ 1223 __be32 ref_tag; /* Target LBA or indirect LBA */ 1224 }; 1225 1226 static inline unsigned 1227 lpfc_cmd_blksize(struct scsi_cmnd *sc) 1228 { 1229 return sc->device->sector_size; 1230 } 1231 1232 /** 1233 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command 1234 * @sc: in: SCSI command 1235 * @apptagmask: out: app tag mask 1236 * @apptagval: out: app tag value 1237 * @reftag: out: ref tag (reference tag) 1238 * 1239 * Description: 1240 * Extract DIF parameters from the command if possible. Otherwise, 1241 * use default parameters. 1242 * 1243 **/ 1244 static inline void 1245 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, 1246 uint16_t *apptagval, uint32_t *reftag) 1247 { 1248 struct scsi_dif_tuple *spt; 1249 unsigned char op = scsi_get_prot_op(sc); 1250 unsigned int protcnt = scsi_prot_sg_count(sc); 1251 static int cnt; 1252 1253 if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 1254 op == SCSI_PROT_WRITE_PASS)) { 1255 1256 cnt++; 1257 spt = page_address(sg_page(scsi_prot_sglist(sc))) + 1258 scsi_prot_sglist(sc)[0].offset; 1259 *apptagmask = 0; 1260 *apptagval = 0; 1261 *reftag = cpu_to_be32(spt->ref_tag); 1262 1263 } else { 1264 /* SBC defines ref tag to be lower 32bits of LBA */ 1265 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc)); 1266 *apptagmask = 0; 1267 *apptagval = 0; 1268 } 1269 } 1270 1271 /* 1272 * This function sets up buffer list for protection groups of 1273 * type LPFC_PG_TYPE_NO_DIF 1274 * 1275 * This is usually used when the HBA is instructed to generate 1276 * DIFs and insert them into data stream (or strip DIF from 1277 * incoming data stream) 1278 * 1279 * The buffer list consists of just one protection group described 1280 * below: 1281 * +-------------------------+ 1282 * start of prot group --> | PDE_5 | 1283 * +-------------------------+ 1284 * | PDE_6 | 1285 * +-------------------------+ 1286 * | Data BDE | 1287 * +-------------------------+ 1288 * |more Data BDE's ... (opt)| 1289 * +-------------------------+ 1290 * 1291 * @sc: pointer to scsi command we're working on 1292 * @bpl: pointer to buffer list for protection groups 1293 * @datacnt: number of segments of data that have been dma mapped 1294 * 1295 * Note: Data s/g buffers have been dma mapped 1296 */ 1297 static int 1298 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1299 struct ulp_bde64 *bpl, int datasegcnt) 1300 { 1301 struct scatterlist *sgde = NULL; /* s/g data entry */ 1302 struct lpfc_pde5 *pde5 = NULL; 1303 struct lpfc_pde6 *pde6 = NULL; 1304 dma_addr_t physaddr; 1305 int i = 0, num_bde = 0, status; 1306 int datadir = sc->sc_data_direction; 1307 unsigned blksize; 1308 uint32_t reftag; 1309 uint16_t apptagmask, apptagval; 1310 uint8_t txop, rxop; 1311 1312 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1313 if (status) 1314 goto out; 1315 1316 /* extract some info from the scsi command for pde*/ 1317 blksize = lpfc_cmd_blksize(sc); 1318 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1319 1320 /* setup PDE5 with what we have */ 1321 pde5 = (struct lpfc_pde5 *) bpl; 1322 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1323 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1324 pde5->reftag = reftag; 1325 1326 /* advance bpl and increment bde count */ 1327 num_bde++; 1328 bpl++; 1329 pde6 = (struct lpfc_pde6 *) bpl; 1330 1331 /* setup PDE6 with the rest of the info */ 1332 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1333 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1334 bf_set(pde6_optx, pde6, txop); 1335 bf_set(pde6_oprx, pde6, rxop); 1336 if (datadir == DMA_FROM_DEVICE) { 1337 bf_set(pde6_ce, pde6, 1); 1338 bf_set(pde6_re, pde6, 1); 1339 bf_set(pde6_ae, pde6, 1); 1340 } 1341 bf_set(pde6_ai, pde6, 1); 1342 bf_set(pde6_apptagval, pde6, apptagval); 1343 1344 /* advance bpl and increment bde count */ 1345 num_bde++; 1346 bpl++; 1347 1348 /* assumption: caller has already run dma_map_sg on command data */ 1349 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1350 physaddr = sg_dma_address(sgde); 1351 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1352 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1353 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1354 if (datadir == DMA_TO_DEVICE) 1355 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1356 else 1357 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1358 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1359 bpl++; 1360 num_bde++; 1361 } 1362 1363 out: 1364 return num_bde; 1365 } 1366 1367 /* 1368 * This function sets up buffer list for protection groups of 1369 * type LPFC_PG_TYPE_DIF_BUF 1370 * 1371 * This is usually used when DIFs are in their own buffers, 1372 * separate from the data. The HBA can then by instructed 1373 * to place the DIFs in the outgoing stream. For read operations, 1374 * The HBA could extract the DIFs and place it in DIF buffers. 1375 * 1376 * The buffer list for this type consists of one or more of the 1377 * protection groups described below: 1378 * +-------------------------+ 1379 * start of first prot group --> | PDE_5 | 1380 * +-------------------------+ 1381 * | PDE_6 | 1382 * +-------------------------+ 1383 * | PDE_7 (Prot BDE) | 1384 * +-------------------------+ 1385 * | Data BDE | 1386 * +-------------------------+ 1387 * |more Data BDE's ... (opt)| 1388 * +-------------------------+ 1389 * start of new prot group --> | PDE_5 | 1390 * +-------------------------+ 1391 * | ... | 1392 * +-------------------------+ 1393 * 1394 * @sc: pointer to scsi command we're working on 1395 * @bpl: pointer to buffer list for protection groups 1396 * @datacnt: number of segments of data that have been dma mapped 1397 * @protcnt: number of segment of protection data that have been dma mapped 1398 * 1399 * Note: It is assumed that both data and protection s/g buffers have been 1400 * mapped for DMA 1401 */ 1402 static int 1403 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1404 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1405 { 1406 struct scatterlist *sgde = NULL; /* s/g data entry */ 1407 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1408 struct lpfc_pde5 *pde5 = NULL; 1409 struct lpfc_pde6 *pde6 = NULL; 1410 struct ulp_bde64 *prot_bde = NULL; 1411 dma_addr_t dataphysaddr, protphysaddr; 1412 unsigned short curr_data = 0, curr_prot = 0; 1413 unsigned int split_offset, protgroup_len; 1414 unsigned int protgrp_blks, protgrp_bytes; 1415 unsigned int remainder, subtotal; 1416 int status; 1417 int datadir = sc->sc_data_direction; 1418 unsigned char pgdone = 0, alldone = 0; 1419 unsigned blksize; 1420 uint32_t reftag; 1421 uint16_t apptagmask, apptagval; 1422 uint8_t txop, rxop; 1423 int num_bde = 0; 1424 1425 sgpe = scsi_prot_sglist(sc); 1426 sgde = scsi_sglist(sc); 1427 1428 if (!sgpe || !sgde) { 1429 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1430 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", 1431 sgpe, sgde); 1432 return 0; 1433 } 1434 1435 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1436 if (status) 1437 goto out; 1438 1439 /* extract some info from the scsi command */ 1440 blksize = lpfc_cmd_blksize(sc); 1441 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1442 1443 split_offset = 0; 1444 do { 1445 /* setup PDE5 with what we have */ 1446 pde5 = (struct lpfc_pde5 *) bpl; 1447 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1448 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1449 pde5->reftag = reftag; 1450 1451 /* advance bpl and increment bde count */ 1452 num_bde++; 1453 bpl++; 1454 pde6 = (struct lpfc_pde6 *) bpl; 1455 1456 /* setup PDE6 with the rest of the info */ 1457 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1458 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1459 bf_set(pde6_optx, pde6, txop); 1460 bf_set(pde6_oprx, pde6, rxop); 1461 bf_set(pde6_ce, pde6, 1); 1462 bf_set(pde6_re, pde6, 1); 1463 bf_set(pde6_ae, pde6, 1); 1464 bf_set(pde6_ai, pde6, 1); 1465 bf_set(pde6_apptagval, pde6, apptagval); 1466 1467 /* advance bpl and increment bde count */ 1468 num_bde++; 1469 bpl++; 1470 1471 /* setup the first BDE that points to protection buffer */ 1472 prot_bde = (struct ulp_bde64 *) bpl; 1473 protphysaddr = sg_dma_address(sgpe); 1474 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1475 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1476 protgroup_len = sg_dma_len(sgpe); 1477 1478 1479 /* must be integer multiple of the DIF block length */ 1480 BUG_ON(protgroup_len % 8); 1481 1482 protgrp_blks = protgroup_len / 8; 1483 protgrp_bytes = protgrp_blks * blksize; 1484 1485 prot_bde->tus.f.bdeSize = protgroup_len; 1486 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; 1487 prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1488 1489 curr_prot++; 1490 num_bde++; 1491 1492 /* setup BDE's for data blocks associated with DIF data */ 1493 pgdone = 0; 1494 subtotal = 0; /* total bytes processed for current prot grp */ 1495 while (!pgdone) { 1496 if (!sgde) { 1497 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1498 "9065 BLKGRD:%s Invalid data segment\n", 1499 __func__); 1500 return 0; 1501 } 1502 bpl++; 1503 dataphysaddr = sg_dma_address(sgde) + split_offset; 1504 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1505 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1506 1507 remainder = sg_dma_len(sgde) - split_offset; 1508 1509 if ((subtotal + remainder) <= protgrp_bytes) { 1510 /* we can use this whole buffer */ 1511 bpl->tus.f.bdeSize = remainder; 1512 split_offset = 0; 1513 1514 if ((subtotal + remainder) == protgrp_bytes) 1515 pgdone = 1; 1516 } else { 1517 /* must split this buffer with next prot grp */ 1518 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1519 split_offset += bpl->tus.f.bdeSize; 1520 } 1521 1522 subtotal += bpl->tus.f.bdeSize; 1523 1524 if (datadir == DMA_TO_DEVICE) 1525 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1526 else 1527 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1528 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1529 1530 num_bde++; 1531 curr_data++; 1532 1533 if (split_offset) 1534 break; 1535 1536 /* Move to the next s/g segment if possible */ 1537 sgde = sg_next(sgde); 1538 1539 } 1540 1541 /* are we done ? */ 1542 if (curr_prot == protcnt) { 1543 alldone = 1; 1544 } else if (curr_prot < protcnt) { 1545 /* advance to next prot buffer */ 1546 sgpe = sg_next(sgpe); 1547 bpl++; 1548 1549 /* update the reference tag */ 1550 reftag += protgrp_blks; 1551 } else { 1552 /* if we're here, we have a bug */ 1553 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1554 "9054 BLKGRD: bug in %s\n", __func__); 1555 } 1556 1557 } while (!alldone); 1558 1559 out: 1560 1561 return num_bde; 1562 } 1563 /* 1564 * Given a SCSI command that supports DIF, determine composition of protection 1565 * groups involved in setting up buffer lists 1566 * 1567 * Returns: 1568 * for DIF (for both read and write) 1569 * */ 1570 static int 1571 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 1572 { 1573 int ret = LPFC_PG_TYPE_INVALID; 1574 unsigned char op = scsi_get_prot_op(sc); 1575 1576 switch (op) { 1577 case SCSI_PROT_READ_STRIP: 1578 case SCSI_PROT_WRITE_INSERT: 1579 ret = LPFC_PG_TYPE_NO_DIF; 1580 break; 1581 case SCSI_PROT_READ_INSERT: 1582 case SCSI_PROT_WRITE_STRIP: 1583 case SCSI_PROT_READ_PASS: 1584 case SCSI_PROT_WRITE_PASS: 1585 ret = LPFC_PG_TYPE_DIF_BUF; 1586 break; 1587 default: 1588 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1589 "9021 Unsupported protection op:%d\n", op); 1590 break; 1591 } 1592 1593 return ret; 1594 } 1595 1596 /* 1597 * This is the protection/DIF aware version of 1598 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 1599 * two functions eventually, but for now, it's here 1600 */ 1601 static int 1602 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, 1603 struct lpfc_scsi_buf *lpfc_cmd) 1604 { 1605 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1606 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1607 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1608 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1609 uint32_t num_bde = 0; 1610 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 1611 int prot_group_type = 0; 1612 int diflen, fcpdl; 1613 unsigned blksize; 1614 1615 /* 1616 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 1617 * fcp_rsp regions to the first data bde entry 1618 */ 1619 bpl += 2; 1620 if (scsi_sg_count(scsi_cmnd)) { 1621 /* 1622 * The driver stores the segment count returned from pci_map_sg 1623 * because this a count of dma-mappings used to map the use_sg 1624 * pages. They are not guaranteed to be the same for those 1625 * architectures that implement an IOMMU. 1626 */ 1627 datasegcnt = dma_map_sg(&phba->pcidev->dev, 1628 scsi_sglist(scsi_cmnd), 1629 scsi_sg_count(scsi_cmnd), datadir); 1630 if (unlikely(!datasegcnt)) 1631 return 1; 1632 1633 lpfc_cmd->seg_cnt = datasegcnt; 1634 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1635 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1636 "9067 BLKGRD: %s: Too many sg segments" 1637 " from dma_map_sg. Config %d, seg_cnt" 1638 " %d\n", 1639 __func__, phba->cfg_sg_seg_cnt, 1640 lpfc_cmd->seg_cnt); 1641 scsi_dma_unmap(scsi_cmnd); 1642 return 1; 1643 } 1644 1645 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 1646 1647 switch (prot_group_type) { 1648 case LPFC_PG_TYPE_NO_DIF: 1649 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 1650 datasegcnt); 1651 /* we should have 2 or more entries in buffer list */ 1652 if (num_bde < 2) 1653 goto err; 1654 break; 1655 case LPFC_PG_TYPE_DIF_BUF:{ 1656 /* 1657 * This type indicates that protection buffers are 1658 * passed to the driver, so that needs to be prepared 1659 * for DMA 1660 */ 1661 protsegcnt = dma_map_sg(&phba->pcidev->dev, 1662 scsi_prot_sglist(scsi_cmnd), 1663 scsi_prot_sg_count(scsi_cmnd), datadir); 1664 if (unlikely(!protsegcnt)) { 1665 scsi_dma_unmap(scsi_cmnd); 1666 return 1; 1667 } 1668 1669 lpfc_cmd->prot_seg_cnt = protsegcnt; 1670 if (lpfc_cmd->prot_seg_cnt 1671 > phba->cfg_prot_sg_seg_cnt) { 1672 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1673 "9068 BLKGRD: %s: Too many prot sg " 1674 "segments from dma_map_sg. Config %d," 1675 "prot_seg_cnt %d\n", __func__, 1676 phba->cfg_prot_sg_seg_cnt, 1677 lpfc_cmd->prot_seg_cnt); 1678 dma_unmap_sg(&phba->pcidev->dev, 1679 scsi_prot_sglist(scsi_cmnd), 1680 scsi_prot_sg_count(scsi_cmnd), 1681 datadir); 1682 scsi_dma_unmap(scsi_cmnd); 1683 return 1; 1684 } 1685 1686 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 1687 datasegcnt, protsegcnt); 1688 /* we should have 3 or more entries in buffer list */ 1689 if (num_bde < 3) 1690 goto err; 1691 break; 1692 } 1693 case LPFC_PG_TYPE_INVALID: 1694 default: 1695 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1696 "9022 Unexpected protection group %i\n", 1697 prot_group_type); 1698 return 1; 1699 } 1700 } 1701 1702 /* 1703 * Finish initializing those IOCB fields that are dependent on the 1704 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 1705 * reinitialized since all iocb memory resources are used many times 1706 * for transmit, receive, and continuation bpl's. 1707 */ 1708 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 1709 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 1710 iocb_cmd->ulpBdeCount = 1; 1711 iocb_cmd->ulpLe = 1; 1712 1713 fcpdl = scsi_bufflen(scsi_cmnd); 1714 1715 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { 1716 /* 1717 * We are in DIF Type 1 mode 1718 * Every data block has a 8 byte DIF (trailer) 1719 * attached to it. Must ajust FCP data length 1720 */ 1721 blksize = lpfc_cmd_blksize(scsi_cmnd); 1722 diflen = (fcpdl / blksize) * 8; 1723 fcpdl += diflen; 1724 } 1725 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 1726 1727 /* 1728 * Due to difference in data length between DIF/non-DIF paths, 1729 * we need to set word 4 of IOCB here 1730 */ 1731 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 1732 1733 return 0; 1734 err: 1735 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1736 "9023 Could not setup all needed BDE's" 1737 "prot_group_type=%d, num_bde=%d\n", 1738 prot_group_type, num_bde); 1739 return 1; 1740 } 1741 1742 /* 1743 * This function checks for BlockGuard errors detected by 1744 * the HBA. In case of errors, the ASC/ASCQ fields in the 1745 * sense buffer will be set accordingly, paired with 1746 * ILLEGAL_REQUEST to signal to the kernel that the HBA 1747 * detected corruption. 1748 * 1749 * Returns: 1750 * 0 - No error found 1751 * 1 - BlockGuard error found 1752 * -1 - Internal error (bad profile, ...etc) 1753 */ 1754 static int 1755 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, 1756 struct lpfc_iocbq *pIocbOut) 1757 { 1758 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 1759 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 1760 int ret = 0; 1761 uint32_t bghm = bgf->bghm; 1762 uint32_t bgstat = bgf->bgstat; 1763 uint64_t failing_sector = 0; 1764 1765 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" 1766 " 0x%x lba 0x%llx blk cnt 0x%x " 1767 "bgstat=0x%x bghm=0x%x\n", 1768 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1769 blk_rq_sectors(cmd->request), bgstat, bghm); 1770 1771 spin_lock(&_dump_buf_lock); 1772 if (!_dump_buf_done) { 1773 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 1774 " Data for %u blocks to debugfs\n", 1775 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1776 lpfc_debug_save_data(phba, cmd); 1777 1778 /* If we have a prot sgl, save the DIF buffer */ 1779 if (lpfc_prot_group_type(phba, cmd) == 1780 LPFC_PG_TYPE_DIF_BUF) { 1781 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " 1782 "Saving DIF for %u blocks to debugfs\n", 1783 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1784 lpfc_debug_save_dif(phba, cmd); 1785 } 1786 1787 _dump_buf_done = 1; 1788 } 1789 spin_unlock(&_dump_buf_lock); 1790 1791 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1792 cmd->result = ScsiResult(DID_ERROR, 0); 1793 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 1794 " BlockGuard profile. bgstat:0x%x\n", 1795 bgstat); 1796 ret = (-1); 1797 goto out; 1798 } 1799 1800 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1801 cmd->result = ScsiResult(DID_ERROR, 0); 1802 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 1803 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1804 bgstat); 1805 ret = (-1); 1806 goto out; 1807 } 1808 1809 if (lpfc_bgs_get_guard_err(bgstat)) { 1810 ret = 1; 1811 1812 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1813 0x10, 0x1); 1814 cmd->result = DRIVER_SENSE << 24 1815 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1816 phba->bg_guard_err_cnt++; 1817 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1818 "9055 BLKGRD: guard_tag error\n"); 1819 } 1820 1821 if (lpfc_bgs_get_reftag_err(bgstat)) { 1822 ret = 1; 1823 1824 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1825 0x10, 0x3); 1826 cmd->result = DRIVER_SENSE << 24 1827 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1828 1829 phba->bg_reftag_err_cnt++; 1830 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1831 "9056 BLKGRD: ref_tag error\n"); 1832 } 1833 1834 if (lpfc_bgs_get_apptag_err(bgstat)) { 1835 ret = 1; 1836 1837 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1838 0x10, 0x2); 1839 cmd->result = DRIVER_SENSE << 24 1840 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1841 1842 phba->bg_apptag_err_cnt++; 1843 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1844 "9061 BLKGRD: app_tag error\n"); 1845 } 1846 1847 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1848 /* 1849 * setup sense data descriptor 0 per SPC-4 as an information 1850 * field, and put the failing LBA in it 1851 */ 1852 cmd->sense_buffer[8] = 0; /* Information */ 1853 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1854 bghm /= cmd->device->sector_size; 1855 1856 failing_sector = scsi_get_lba(cmd); 1857 failing_sector += bghm; 1858 1859 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1860 } 1861 1862 if (!ret) { 1863 /* No error was reported - problem in FW? */ 1864 cmd->result = ScsiResult(DID_ERROR, 0); 1865 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1866 "9057 BLKGRD: no errors reported!\n"); 1867 } 1868 1869 out: 1870 return ret; 1871 } 1872 1873 /** 1874 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 1875 * @phba: The Hba for which this call is being executed. 1876 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1877 * 1878 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1879 * field of @lpfc_cmd for device with SLI-4 interface spec. 1880 * 1881 * Return codes: 1882 * 1 - Error 1883 * 0 - Success 1884 **/ 1885 static int 1886 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1887 { 1888 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1889 struct scatterlist *sgel = NULL; 1890 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1891 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 1892 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1893 dma_addr_t physaddr; 1894 uint32_t num_bde = 0; 1895 uint32_t dma_len; 1896 uint32_t dma_offset = 0; 1897 int nseg; 1898 1899 /* 1900 * There are three possibilities here - use scatter-gather segment, use 1901 * the single mapping, or neither. Start the lpfc command prep by 1902 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1903 * data bde entry. 1904 */ 1905 if (scsi_sg_count(scsi_cmnd)) { 1906 /* 1907 * The driver stores the segment count returned from pci_map_sg 1908 * because this a count of dma-mappings used to map the use_sg 1909 * pages. They are not guaranteed to be the same for those 1910 * architectures that implement an IOMMU. 1911 */ 1912 1913 nseg = scsi_dma_map(scsi_cmnd); 1914 if (unlikely(!nseg)) 1915 return 1; 1916 sgl += 1; 1917 /* clear the last flag in the fcp_rsp map entry */ 1918 sgl->word2 = le32_to_cpu(sgl->word2); 1919 bf_set(lpfc_sli4_sge_last, sgl, 0); 1920 sgl->word2 = cpu_to_le32(sgl->word2); 1921 sgl += 1; 1922 1923 lpfc_cmd->seg_cnt = nseg; 1924 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1925 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 1926 " %s: Too many sg segments from " 1927 "dma_map_sg. Config %d, seg_cnt %d\n", 1928 __func__, phba->cfg_sg_seg_cnt, 1929 lpfc_cmd->seg_cnt); 1930 scsi_dma_unmap(scsi_cmnd); 1931 return 1; 1932 } 1933 1934 /* 1935 * The driver established a maximum scatter-gather segment count 1936 * during probe that limits the number of sg elements in any 1937 * single scsi command. Just run through the seg_cnt and format 1938 * the sge's. 1939 * When using SLI-3 the driver will try to fit all the BDEs into 1940 * the IOCB. If it can't then the BDEs get added to a BPL as it 1941 * does for SLI-2 mode. 1942 */ 1943 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1944 physaddr = sg_dma_address(sgel); 1945 dma_len = sg_dma_len(sgel); 1946 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 1947 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 1948 if ((num_bde + 1) == nseg) 1949 bf_set(lpfc_sli4_sge_last, sgl, 1); 1950 else 1951 bf_set(lpfc_sli4_sge_last, sgl, 0); 1952 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 1953 sgl->word2 = cpu_to_le32(sgl->word2); 1954 sgl->sge_len = cpu_to_le32(dma_len); 1955 dma_offset += dma_len; 1956 sgl++; 1957 } 1958 } else { 1959 sgl += 1; 1960 /* clear the last flag in the fcp_rsp map entry */ 1961 sgl->word2 = le32_to_cpu(sgl->word2); 1962 bf_set(lpfc_sli4_sge_last, sgl, 1); 1963 sgl->word2 = cpu_to_le32(sgl->word2); 1964 } 1965 1966 /* 1967 * Finish initializing those IOCB fields that are dependent on the 1968 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1969 * explicitly reinitialized. 1970 * all iocb memory resources are reused. 1971 */ 1972 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1973 1974 /* 1975 * Due to difference in data length between DIF/non-DIF paths, 1976 * we need to set word 4 of IOCB here 1977 */ 1978 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1979 return 0; 1980 } 1981 1982 /** 1983 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 1984 * @phba: The Hba for which this call is being executed. 1985 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1986 * 1987 * This routine wraps the actual DMA mapping function pointer from the 1988 * lpfc_hba struct. 1989 * 1990 * Return codes: 1991 * 1 - Error 1992 * 0 - Success 1993 **/ 1994 static inline int 1995 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1996 { 1997 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 1998 } 1999 2000 /** 2001 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 2002 * @phba: Pointer to hba context object. 2003 * @vport: Pointer to vport object. 2004 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 2005 * @rsp_iocb: Pointer to response iocb object which reported error. 2006 * 2007 * This function posts an event when there is a SCSI command reporting 2008 * error from the scsi device. 2009 **/ 2010 static void 2011 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 2012 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { 2013 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2014 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2015 uint32_t resp_info = fcprsp->rspStatus2; 2016 uint32_t scsi_status = fcprsp->rspStatus3; 2017 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2018 struct lpfc_fast_path_event *fast_path_evt = NULL; 2019 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 2020 unsigned long flags; 2021 2022 /* If there is queuefull or busy condition send a scsi event */ 2023 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 2024 (cmnd->result == SAM_STAT_BUSY)) { 2025 fast_path_evt = lpfc_alloc_fast_evt(phba); 2026 if (!fast_path_evt) 2027 return; 2028 fast_path_evt->un.scsi_evt.event_type = 2029 FC_REG_SCSI_EVENT; 2030 fast_path_evt->un.scsi_evt.subcategory = 2031 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 2032 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 2033 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 2034 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 2035 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2036 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 2037 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2038 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 2039 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 2040 fast_path_evt = lpfc_alloc_fast_evt(phba); 2041 if (!fast_path_evt) 2042 return; 2043 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 2044 FC_REG_SCSI_EVENT; 2045 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 2046 LPFC_EVENT_CHECK_COND; 2047 fast_path_evt->un.check_cond_evt.scsi_event.lun = 2048 cmnd->device->lun; 2049 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 2050 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2051 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 2052 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2053 fast_path_evt->un.check_cond_evt.sense_key = 2054 cmnd->sense_buffer[2] & 0xf; 2055 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 2056 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 2057 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2058 fcpi_parm && 2059 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 2060 ((scsi_status == SAM_STAT_GOOD) && 2061 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 2062 /* 2063 * If status is good or resid does not match with fcp_param and 2064 * there is valid fcpi_parm, then there is a read_check error 2065 */ 2066 fast_path_evt = lpfc_alloc_fast_evt(phba); 2067 if (!fast_path_evt) 2068 return; 2069 fast_path_evt->un.read_check_error.header.event_type = 2070 FC_REG_FABRIC_EVENT; 2071 fast_path_evt->un.read_check_error.header.subcategory = 2072 LPFC_EVENT_FCPRDCHKERR; 2073 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 2074 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2075 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 2076 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2077 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 2078 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 2079 fast_path_evt->un.read_check_error.fcpiparam = 2080 fcpi_parm; 2081 } else 2082 return; 2083 2084 fast_path_evt->vport = vport; 2085 spin_lock_irqsave(&phba->hbalock, flags); 2086 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 2087 spin_unlock_irqrestore(&phba->hbalock, flags); 2088 lpfc_worker_wake_up(phba); 2089 return; 2090 } 2091 2092 /** 2093 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 2094 * @phba: The HBA for which this call is being executed. 2095 * @psb: The scsi buffer which is going to be un-mapped. 2096 * 2097 * This routine does DMA un-mapping of scatter gather list of scsi command 2098 * field of @lpfc_cmd for device with SLI-3 interface spec. 2099 **/ 2100 static void 2101 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 2102 { 2103 /* 2104 * There are only two special cases to consider. (1) the scsi command 2105 * requested scatter-gather usage or (2) the scsi command allocated 2106 * a request buffer, but did not request use_sg. There is a third 2107 * case, but it does not require resource deallocation. 2108 */ 2109 if (psb->seg_cnt > 0) 2110 scsi_dma_unmap(psb->pCmd); 2111 if (psb->prot_seg_cnt > 0) 2112 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 2113 scsi_prot_sg_count(psb->pCmd), 2114 psb->pCmd->sc_data_direction); 2115 } 2116 2117 /** 2118 * lpfc_handler_fcp_err - FCP response handler 2119 * @vport: The virtual port for which this call is being executed. 2120 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2121 * @rsp_iocb: The response IOCB which contains FCP error. 2122 * 2123 * This routine is called to process response IOCB with status field 2124 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 2125 * based upon SCSI and FCP error. 2126 **/ 2127 static void 2128 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2129 struct lpfc_iocbq *rsp_iocb) 2130 { 2131 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2132 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 2133 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2134 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2135 uint32_t resp_info = fcprsp->rspStatus2; 2136 uint32_t scsi_status = fcprsp->rspStatus3; 2137 uint32_t *lp; 2138 uint32_t host_status = DID_OK; 2139 uint32_t rsplen = 0; 2140 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 2141 2142 2143 /* 2144 * If this is a task management command, there is no 2145 * scsi packet associated with this lpfc_cmd. The driver 2146 * consumes it. 2147 */ 2148 if (fcpcmd->fcpCntl2) { 2149 scsi_status = 0; 2150 goto out; 2151 } 2152 2153 if (resp_info & RSP_LEN_VALID) { 2154 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2155 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 2156 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2157 "2719 Invalid response length: " 2158 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2159 cmnd->device->id, 2160 cmnd->device->lun, cmnd->cmnd[0], 2161 rsplen); 2162 host_status = DID_ERROR; 2163 goto out; 2164 } 2165 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 2166 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2167 "2757 Protocol failure detected during " 2168 "processing of FCP I/O op: " 2169 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", 2170 cmnd->device->id, 2171 cmnd->device->lun, cmnd->cmnd[0], 2172 fcprsp->rspInfo3); 2173 host_status = DID_ERROR; 2174 goto out; 2175 } 2176 } 2177 2178 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2179 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2180 if (snslen > SCSI_SENSE_BUFFERSIZE) 2181 snslen = SCSI_SENSE_BUFFERSIZE; 2182 2183 if (resp_info & RSP_LEN_VALID) 2184 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2185 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 2186 } 2187 lp = (uint32_t *)cmnd->sense_buffer; 2188 2189 if (!scsi_status && (resp_info & RESID_UNDER)) 2190 logit = LOG_FCP; 2191 2192 lpfc_printf_vlog(vport, KERN_WARNING, logit, 2193 "9024 FCP command x%x failed: x%x SNS x%x x%x " 2194 "Data: x%x x%x x%x x%x x%x\n", 2195 cmnd->cmnd[0], scsi_status, 2196 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 2197 be32_to_cpu(fcprsp->rspResId), 2198 be32_to_cpu(fcprsp->rspSnsLen), 2199 be32_to_cpu(fcprsp->rspRspLen), 2200 fcprsp->rspInfo3); 2201 2202 scsi_set_resid(cmnd, 0); 2203 if (resp_info & RESID_UNDER) { 2204 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2205 2206 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2207 "9025 FCP Read Underrun, expected %d, " 2208 "residual %d Data: x%x x%x x%x\n", 2209 be32_to_cpu(fcpcmd->fcpDl), 2210 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 2211 cmnd->underflow); 2212 2213 /* 2214 * If there is an under run check if under run reported by 2215 * storage array is same as the under run reported by HBA. 2216 * If this is not same, there is a dropped frame. 2217 */ 2218 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2219 fcpi_parm && 2220 (scsi_get_resid(cmnd) != fcpi_parm)) { 2221 lpfc_printf_vlog(vport, KERN_WARNING, 2222 LOG_FCP | LOG_FCP_ERROR, 2223 "9026 FCP Read Check Error " 2224 "and Underrun Data: x%x x%x x%x x%x\n", 2225 be32_to_cpu(fcpcmd->fcpDl), 2226 scsi_get_resid(cmnd), fcpi_parm, 2227 cmnd->cmnd[0]); 2228 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2229 host_status = DID_ERROR; 2230 } 2231 /* 2232 * The cmnd->underflow is the minimum number of bytes that must 2233 * be transfered for this command. Provided a sense condition 2234 * is not present, make sure the actual amount transferred is at 2235 * least the underflow value or fail. 2236 */ 2237 if (!(resp_info & SNS_LEN_VALID) && 2238 (scsi_status == SAM_STAT_GOOD) && 2239 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 2240 < cmnd->underflow)) { 2241 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2242 "9027 FCP command x%x residual " 2243 "underrun converted to error " 2244 "Data: x%x x%x x%x\n", 2245 cmnd->cmnd[0], scsi_bufflen(cmnd), 2246 scsi_get_resid(cmnd), cmnd->underflow); 2247 host_status = DID_ERROR; 2248 } 2249 } else if (resp_info & RESID_OVER) { 2250 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2251 "9028 FCP command x%x residual overrun error. " 2252 "Data: x%x x%x\n", cmnd->cmnd[0], 2253 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 2254 host_status = DID_ERROR; 2255 2256 /* 2257 * Check SLI validation that all the transfer was actually done 2258 * (fcpi_parm should be zero). Apply check only to reads. 2259 */ 2260 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 2261 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 2262 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 2263 "9029 FCP Read Check Error Data: " 2264 "x%x x%x x%x x%x\n", 2265 be32_to_cpu(fcpcmd->fcpDl), 2266 be32_to_cpu(fcprsp->rspResId), 2267 fcpi_parm, cmnd->cmnd[0]); 2268 host_status = DID_ERROR; 2269 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2270 } 2271 2272 out: 2273 cmnd->result = ScsiResult(host_status, scsi_status); 2274 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 2275 } 2276 2277 /** 2278 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2279 * @phba: The Hba for which this call is being executed. 2280 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2281 * @pIocbOut: The response IOCBQ for the scsi cmnd. 2282 * 2283 * This routine assigns scsi command result by looking into response IOCB 2284 * status field appropriately. This routine handles QUEUE FULL condition as 2285 * well by ramping down device queue depth. 2286 **/ 2287 static void 2288 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 2289 struct lpfc_iocbq *pIocbOut) 2290 { 2291 struct lpfc_scsi_buf *lpfc_cmd = 2292 (struct lpfc_scsi_buf *) pIocbIn->context1; 2293 struct lpfc_vport *vport = pIocbIn->vport; 2294 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2295 struct lpfc_nodelist *pnode = rdata->pnode; 2296 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 2297 int result; 2298 struct scsi_device *tmp_sdev; 2299 int depth; 2300 unsigned long flags; 2301 struct lpfc_fast_path_event *fast_path_evt; 2302 struct Scsi_Host *shost = cmd->device->host; 2303 uint32_t queue_depth, scsi_id; 2304 2305 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2306 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2307 /* pick up SLI4 exhange busy status from HBA */ 2308 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 2309 2310 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2311 atomic_dec(&pnode->cmd_pending); 2312 2313 if (lpfc_cmd->status) { 2314 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 2315 (lpfc_cmd->result & IOERR_DRVR_MASK)) 2316 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 2317 else if (lpfc_cmd->status >= IOSTAT_CNT) 2318 lpfc_cmd->status = IOSTAT_DEFAULT; 2319 2320 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2321 "9030 FCP cmd x%x failed <%d/%d> " 2322 "status: x%x result: x%x Data: x%x x%x\n", 2323 cmd->cmnd[0], 2324 cmd->device ? cmd->device->id : 0xffff, 2325 cmd->device ? cmd->device->lun : 0xffff, 2326 lpfc_cmd->status, lpfc_cmd->result, 2327 pIocbOut->iocb.ulpContext, 2328 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 2329 2330 switch (lpfc_cmd->status) { 2331 case IOSTAT_FCP_RSP_ERROR: 2332 /* Call FCP RSP handler to determine result */ 2333 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 2334 break; 2335 case IOSTAT_NPORT_BSY: 2336 case IOSTAT_FABRIC_BSY: 2337 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2338 fast_path_evt = lpfc_alloc_fast_evt(phba); 2339 if (!fast_path_evt) 2340 break; 2341 fast_path_evt->un.fabric_evt.event_type = 2342 FC_REG_FABRIC_EVENT; 2343 fast_path_evt->un.fabric_evt.subcategory = 2344 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 2345 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 2346 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2347 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 2348 &pnode->nlp_portname, 2349 sizeof(struct lpfc_name)); 2350 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 2351 &pnode->nlp_nodename, 2352 sizeof(struct lpfc_name)); 2353 } 2354 fast_path_evt->vport = vport; 2355 fast_path_evt->work_evt.evt = 2356 LPFC_EVT_FASTPATH_MGMT_EVT; 2357 spin_lock_irqsave(&phba->hbalock, flags); 2358 list_add_tail(&fast_path_evt->work_evt.evt_listp, 2359 &phba->work_list); 2360 spin_unlock_irqrestore(&phba->hbalock, flags); 2361 lpfc_worker_wake_up(phba); 2362 break; 2363 case IOSTAT_LOCAL_REJECT: 2364 if (lpfc_cmd->result == IOERR_INVALID_RPI || 2365 lpfc_cmd->result == IOERR_NO_RESOURCES || 2366 lpfc_cmd->result == IOERR_ABORT_REQUESTED) { 2367 cmd->result = ScsiResult(DID_REQUEUE, 0); 2368 break; 2369 } 2370 2371 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 2372 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 2373 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 2374 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2375 /* 2376 * This is a response for a BG enabled 2377 * cmd. Parse BG error 2378 */ 2379 lpfc_parse_bg_err(phba, lpfc_cmd, 2380 pIocbOut); 2381 break; 2382 } else { 2383 lpfc_printf_vlog(vport, KERN_WARNING, 2384 LOG_BG, 2385 "9031 non-zero BGSTAT " 2386 "on unprotected cmd\n"); 2387 } 2388 } 2389 2390 /* else: fall through */ 2391 default: 2392 cmd->result = ScsiResult(DID_ERROR, 0); 2393 break; 2394 } 2395 2396 if (!pnode || !NLP_CHK_NODE_ACT(pnode) 2397 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 2398 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 2399 SAM_STAT_BUSY); 2400 } else { 2401 cmd->result = ScsiResult(DID_OK, 0); 2402 } 2403 2404 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 2405 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 2406 2407 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2408 "0710 Iodone <%d/%d> cmd %p, error " 2409 "x%x SNS x%x x%x Data: x%x x%x\n", 2410 cmd->device->id, cmd->device->lun, cmd, 2411 cmd->result, *lp, *(lp + 3), cmd->retries, 2412 scsi_get_resid(cmd)); 2413 } 2414 2415 lpfc_update_stats(phba, lpfc_cmd); 2416 result = cmd->result; 2417 if (vport->cfg_max_scsicmpl_time && 2418 time_after(jiffies, lpfc_cmd->start_time + 2419 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 2420 spin_lock_irqsave(shost->host_lock, flags); 2421 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2422 if (pnode->cmd_qdepth > 2423 atomic_read(&pnode->cmd_pending) && 2424 (atomic_read(&pnode->cmd_pending) > 2425 LPFC_MIN_TGT_QDEPTH) && 2426 ((cmd->cmnd[0] == READ_10) || 2427 (cmd->cmnd[0] == WRITE_10))) 2428 pnode->cmd_qdepth = 2429 atomic_read(&pnode->cmd_pending); 2430 2431 pnode->last_change_time = jiffies; 2432 } 2433 spin_unlock_irqrestore(shost->host_lock, flags); 2434 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2435 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) && 2436 time_after(jiffies, pnode->last_change_time + 2437 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 2438 spin_lock_irqsave(shost->host_lock, flags); 2439 pnode->cmd_qdepth += pnode->cmd_qdepth * 2440 LPFC_TGTQ_RAMPUP_PCENT / 100; 2441 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH) 2442 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 2443 pnode->last_change_time = jiffies; 2444 spin_unlock_irqrestore(shost->host_lock, flags); 2445 } 2446 } 2447 2448 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2449 2450 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 2451 queue_depth = cmd->device->queue_depth; 2452 scsi_id = cmd->device->id; 2453 cmd->scsi_done(cmd); 2454 2455 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2456 /* 2457 * If there is a thread waiting for command completion 2458 * wake up the thread. 2459 */ 2460 spin_lock_irqsave(shost->host_lock, flags); 2461 lpfc_cmd->pCmd = NULL; 2462 if (lpfc_cmd->waitq) 2463 wake_up(lpfc_cmd->waitq); 2464 spin_unlock_irqrestore(shost->host_lock, flags); 2465 lpfc_release_scsi_buf(phba, lpfc_cmd); 2466 return; 2467 } 2468 2469 if (!result) 2470 lpfc_rampup_queue_depth(vport, queue_depth); 2471 2472 /* 2473 * Check for queue full. If the lun is reporting queue full, then 2474 * back off the lun queue depth to prevent target overloads. 2475 */ 2476 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2477 NLP_CHK_NODE_ACT(pnode)) { 2478 shost_for_each_device(tmp_sdev, shost) { 2479 if (tmp_sdev->id != scsi_id) 2480 continue; 2481 depth = scsi_track_queue_full(tmp_sdev, 2482 tmp_sdev->queue_depth-1); 2483 if (depth <= 0) 2484 continue; 2485 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2486 "0711 detected queue full - lun queue " 2487 "depth adjusted to %d.\n", depth); 2488 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2489 pnode, 2490 tmp_sdev->lun, 2491 depth+1, depth); 2492 } 2493 } 2494 2495 /* 2496 * If there is a thread waiting for command completion 2497 * wake up the thread. 2498 */ 2499 spin_lock_irqsave(shost->host_lock, flags); 2500 lpfc_cmd->pCmd = NULL; 2501 if (lpfc_cmd->waitq) 2502 wake_up(lpfc_cmd->waitq); 2503 spin_unlock_irqrestore(shost->host_lock, flags); 2504 2505 lpfc_release_scsi_buf(phba, lpfc_cmd); 2506 } 2507 2508 /** 2509 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 2510 * @data: A pointer to the immediate command data portion of the IOCB. 2511 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 2512 * 2513 * The routine copies the entire FCP command from @fcp_cmnd to @data while 2514 * byte swapping the data to big endian format for transmission on the wire. 2515 **/ 2516 static void 2517 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 2518 { 2519 int i, j; 2520 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 2521 i += sizeof(uint32_t), j++) { 2522 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 2523 } 2524 } 2525 2526 /** 2527 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 2528 * @vport: The virtual port for which this call is being executed. 2529 * @lpfc_cmd: The scsi command which needs to send. 2530 * @pnode: Pointer to lpfc_nodelist. 2531 * 2532 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2533 * to transfer for device with SLI3 interface spec. 2534 **/ 2535 static void 2536 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2537 struct lpfc_nodelist *pnode) 2538 { 2539 struct lpfc_hba *phba = vport->phba; 2540 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2541 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2542 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2543 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 2544 int datadir = scsi_cmnd->sc_data_direction; 2545 char tag[2]; 2546 2547 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 2548 return; 2549 2550 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 2551 /* clear task management bits */ 2552 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 2553 2554 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 2555 &lpfc_cmd->fcp_cmnd->fcp_lun); 2556 2557 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 2558 2559 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 2560 switch (tag[0]) { 2561 case HEAD_OF_QUEUE_TAG: 2562 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 2563 break; 2564 case ORDERED_QUEUE_TAG: 2565 fcp_cmnd->fcpCntl1 = ORDERED_Q; 2566 break; 2567 default: 2568 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 2569 break; 2570 } 2571 } else 2572 fcp_cmnd->fcpCntl1 = 0; 2573 2574 /* 2575 * There are three possibilities here - use scatter-gather segment, use 2576 * the single mapping, or neither. Start the lpfc command prep by 2577 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 2578 * data bde entry. 2579 */ 2580 if (scsi_sg_count(scsi_cmnd)) { 2581 if (datadir == DMA_TO_DEVICE) { 2582 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2583 if (phba->sli_rev < LPFC_SLI_REV4) { 2584 iocb_cmd->un.fcpi.fcpi_parm = 0; 2585 iocb_cmd->ulpPU = 0; 2586 } else 2587 iocb_cmd->ulpPU = PARM_READ_CHECK; 2588 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2589 phba->fc4OutputRequests++; 2590 } else { 2591 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2592 iocb_cmd->ulpPU = PARM_READ_CHECK; 2593 fcp_cmnd->fcpCntl3 = READ_DATA; 2594 phba->fc4InputRequests++; 2595 } 2596 } else { 2597 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 2598 iocb_cmd->un.fcpi.fcpi_parm = 0; 2599 iocb_cmd->ulpPU = 0; 2600 fcp_cmnd->fcpCntl3 = 0; 2601 phba->fc4ControlRequests++; 2602 } 2603 if (phba->sli_rev == 3 && 2604 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2605 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2606 /* 2607 * Finish initializing those IOCB fields that are independent 2608 * of the scsi_cmnd request_buffer 2609 */ 2610 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2611 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2612 piocbq->iocb.ulpFCP2Rcvy = 1; 2613 else 2614 piocbq->iocb.ulpFCP2Rcvy = 0; 2615 2616 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 2617 piocbq->context1 = lpfc_cmd; 2618 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2619 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 2620 piocbq->vport = vport; 2621 } 2622 2623 /** 2624 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2625 * @vport: The virtual port for which this call is being executed. 2626 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2627 * @lun: Logical unit number. 2628 * @task_mgmt_cmd: SCSI task management command. 2629 * 2630 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 2631 * for device with SLI-3 interface spec. 2632 * 2633 * Return codes: 2634 * 0 - Error 2635 * 1 - Success 2636 **/ 2637 static int 2638 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2639 struct lpfc_scsi_buf *lpfc_cmd, 2640 unsigned int lun, 2641 uint8_t task_mgmt_cmd) 2642 { 2643 struct lpfc_iocbq *piocbq; 2644 IOCB_t *piocb; 2645 struct fcp_cmnd *fcp_cmnd; 2646 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2647 struct lpfc_nodelist *ndlp = rdata->pnode; 2648 2649 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2650 ndlp->nlp_state != NLP_STE_MAPPED_NODE) 2651 return 0; 2652 2653 piocbq = &(lpfc_cmd->cur_iocbq); 2654 piocbq->vport = vport; 2655 2656 piocb = &piocbq->iocb; 2657 2658 fcp_cmnd = lpfc_cmd->fcp_cmnd; 2659 /* Clear out any old data in the FCP command area */ 2660 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2661 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2662 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2663 if (vport->phba->sli_rev == 3 && 2664 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2665 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2666 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2667 piocb->ulpContext = ndlp->nlp_rpi; 2668 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2669 piocb->ulpFCP2Rcvy = 1; 2670 } 2671 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 2672 2673 /* ulpTimeout is only one byte */ 2674 if (lpfc_cmd->timeout > 0xff) { 2675 /* 2676 * Do not timeout the command at the firmware level. 2677 * The driver will provide the timeout mechanism. 2678 */ 2679 piocb->ulpTimeout = 0; 2680 } else 2681 piocb->ulpTimeout = lpfc_cmd->timeout; 2682 2683 if (vport->phba->sli_rev == LPFC_SLI_REV4) 2684 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 2685 2686 return 1; 2687 } 2688 2689 /** 2690 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table 2691 * @phba: The hba struct for which this call is being executed. 2692 * @dev_grp: The HBA PCI-Device group number. 2693 * 2694 * This routine sets up the SCSI interface API function jump table in @phba 2695 * struct. 2696 * Returns: 0 - success, -ENODEV - failure. 2697 **/ 2698 int 2699 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2700 { 2701 2702 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 2703 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; 2704 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2705 2706 switch (dev_grp) { 2707 case LPFC_PCI_DEV_LP: 2708 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2709 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2710 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2711 break; 2712 case LPFC_PCI_DEV_OC: 2713 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2714 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2715 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2716 break; 2717 default: 2718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2719 "1418 Invalid HBA PCI-device group: 0x%x\n", 2720 dev_grp); 2721 return -ENODEV; 2722 break; 2723 } 2724 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; 2725 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2726 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2727 return 0; 2728 } 2729 2730 /** 2731 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2732 * @phba: The Hba for which this call is being executed. 2733 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2734 * @rspiocbq: Pointer to lpfc_iocbq data structure. 2735 * 2736 * This routine is IOCB completion routine for device reset and target reset 2737 * routine. This routine release scsi buffer associated with lpfc_cmd. 2738 **/ 2739 static void 2740 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2741 struct lpfc_iocbq *cmdiocbq, 2742 struct lpfc_iocbq *rspiocbq) 2743 { 2744 struct lpfc_scsi_buf *lpfc_cmd = 2745 (struct lpfc_scsi_buf *) cmdiocbq->context1; 2746 if (lpfc_cmd) 2747 lpfc_release_scsi_buf(phba, lpfc_cmd); 2748 return; 2749 } 2750 2751 /** 2752 * lpfc_info - Info entry point of scsi_host_template data structure 2753 * @host: The scsi host for which this call is being executed. 2754 * 2755 * This routine provides module information about hba. 2756 * 2757 * Reutrn code: 2758 * Pointer to char - Success. 2759 **/ 2760 const char * 2761 lpfc_info(struct Scsi_Host *host) 2762 { 2763 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 2764 struct lpfc_hba *phba = vport->phba; 2765 int len; 2766 static char lpfcinfobuf[384]; 2767 2768 memset(lpfcinfobuf,0,384); 2769 if (phba && phba->pcidev){ 2770 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 2771 len = strlen(lpfcinfobuf); 2772 snprintf(lpfcinfobuf + len, 2773 384-len, 2774 " on PCI bus %02x device %02x irq %d", 2775 phba->pcidev->bus->number, 2776 phba->pcidev->devfn, 2777 phba->pcidev->irq); 2778 len = strlen(lpfcinfobuf); 2779 if (phba->Port[0]) { 2780 snprintf(lpfcinfobuf + len, 2781 384-len, 2782 " port %s", 2783 phba->Port); 2784 } 2785 len = strlen(lpfcinfobuf); 2786 if (phba->sli4_hba.link_state.logical_speed) { 2787 snprintf(lpfcinfobuf + len, 2788 384-len, 2789 " Logical Link Speed: %d Mbps", 2790 phba->sli4_hba.link_state.logical_speed * 10); 2791 } 2792 } 2793 return lpfcinfobuf; 2794 } 2795 2796 /** 2797 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba 2798 * @phba: The Hba for which this call is being executed. 2799 * 2800 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 2801 * The default value of cfg_poll_tmo is 10 milliseconds. 2802 **/ 2803 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2804 { 2805 unsigned long poll_tmo_expires = 2806 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 2807 2808 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 2809 mod_timer(&phba->fcp_poll_timer, 2810 poll_tmo_expires); 2811 } 2812 2813 /** 2814 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 2815 * @phba: The Hba for which this call is being executed. 2816 * 2817 * This routine starts the fcp_poll_timer of @phba. 2818 **/ 2819 void lpfc_poll_start_timer(struct lpfc_hba * phba) 2820 { 2821 lpfc_poll_rearm_timer(phba); 2822 } 2823 2824 /** 2825 * lpfc_poll_timeout - Restart polling timer 2826 * @ptr: Map to lpfc_hba data structure pointer. 2827 * 2828 * This routine restarts fcp_poll timer, when FCP ring polling is enable 2829 * and FCP Ring interrupt is disable. 2830 **/ 2831 2832 void lpfc_poll_timeout(unsigned long ptr) 2833 { 2834 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2835 2836 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2837 lpfc_sli_handle_fast_ring_event(phba, 2838 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 2839 2840 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2841 lpfc_poll_rearm_timer(phba); 2842 } 2843 } 2844 2845 /** 2846 * lpfc_queuecommand - scsi_host_template queuecommand entry point 2847 * @cmnd: Pointer to scsi_cmnd data structure. 2848 * @done: Pointer to done routine. 2849 * 2850 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 2851 * This routine prepares an IOCB from scsi command and provides to firmware. 2852 * The @done callback is invoked after driver finished processing the command. 2853 * 2854 * Return value : 2855 * 0 - Success 2856 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 2857 **/ 2858 static int 2859 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2860 { 2861 struct Scsi_Host *shost = cmnd->device->host; 2862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2863 struct lpfc_hba *phba = vport->phba; 2864 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2865 struct lpfc_nodelist *ndlp; 2866 struct lpfc_scsi_buf *lpfc_cmd; 2867 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2868 int err; 2869 2870 err = fc_remote_port_chkready(rport); 2871 if (err) { 2872 cmnd->result = err; 2873 goto out_fail_command; 2874 } 2875 ndlp = rdata->pnode; 2876 2877 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2878 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2879 2880 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2881 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 2882 " op:%02x str=%s without registering for" 2883 " BlockGuard - Rejecting command\n", 2884 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2885 dif_op_str[scsi_get_prot_op(cmnd)]); 2886 goto out_fail_command; 2887 } 2888 2889 /* 2890 * Catch race where our node has transitioned, but the 2891 * transport is still transitioning. 2892 */ 2893 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 2894 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2895 goto out_fail_command; 2896 } 2897 if (vport->cfg_max_scsicmpl_time && 2898 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)) 2899 goto out_host_busy; 2900 2901 lpfc_cmd = lpfc_get_scsi_buf(phba); 2902 if (lpfc_cmd == NULL) { 2903 lpfc_rampdown_queue_depth(phba); 2904 2905 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2906 "0707 driver's buffer pool is empty, " 2907 "IO busied\n"); 2908 goto out_host_busy; 2909 } 2910 2911 /* 2912 * Store the midlayer's command structure for the completion phase 2913 * and complete the command initialization. 2914 */ 2915 lpfc_cmd->pCmd = cmnd; 2916 lpfc_cmd->rdata = rdata; 2917 lpfc_cmd->timeout = 0; 2918 lpfc_cmd->start_time = jiffies; 2919 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 2920 cmnd->scsi_done = done; 2921 2922 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2923 if (vport->phba->cfg_enable_bg) { 2924 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2925 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 2926 "str=%s\n", 2927 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2928 dif_op_str[scsi_get_prot_op(cmnd)]); 2929 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2930 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 2931 "%02x %02x %02x %02x %02x\n", 2932 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 2933 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 2934 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 2935 cmnd->cmnd[9]); 2936 if (cmnd->cmnd[0] == READ_10) 2937 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2938 "9035 BLKGRD: READ @ sector %llu, " 2939 "count %u\n", 2940 (unsigned long long)scsi_get_lba(cmnd), 2941 blk_rq_sectors(cmnd->request)); 2942 else if (cmnd->cmnd[0] == WRITE_10) 2943 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2944 "9036 BLKGRD: WRITE @ sector %llu, " 2945 "count %u cmd=%p\n", 2946 (unsigned long long)scsi_get_lba(cmnd), 2947 blk_rq_sectors(cmnd->request), 2948 cmnd); 2949 } 2950 2951 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 2952 } else { 2953 if (vport->phba->cfg_enable_bg) { 2954 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2955 "9038 BLKGRD: rcvd unprotected cmd:" 2956 "%02x op:%02x str=%s\n", 2957 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2958 dif_op_str[scsi_get_prot_op(cmnd)]); 2959 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2960 "9039 BLKGRD: CDB: %02x %02x %02x " 2961 "%02x %02x %02x %02x %02x %02x %02x\n", 2962 cmnd->cmnd[0], cmnd->cmnd[1], 2963 cmnd->cmnd[2], cmnd->cmnd[3], 2964 cmnd->cmnd[4], cmnd->cmnd[5], 2965 cmnd->cmnd[6], cmnd->cmnd[7], 2966 cmnd->cmnd[8], cmnd->cmnd[9]); 2967 if (cmnd->cmnd[0] == READ_10) 2968 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2969 "9040 dbg: READ @ sector %llu, " 2970 "count %u\n", 2971 (unsigned long long)scsi_get_lba(cmnd), 2972 blk_rq_sectors(cmnd->request)); 2973 else if (cmnd->cmnd[0] == WRITE_10) 2974 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2975 "9041 dbg: WRITE @ sector %llu, " 2976 "count %u cmd=%p\n", 2977 (unsigned long long)scsi_get_lba(cmnd), 2978 blk_rq_sectors(cmnd->request), cmnd); 2979 else 2980 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2981 "9042 dbg: parser not implemented\n"); 2982 } 2983 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2984 } 2985 2986 if (err) 2987 goto out_host_busy_free_buf; 2988 2989 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 2990 2991 atomic_inc(&ndlp->cmd_pending); 2992 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 2993 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 2994 if (err) { 2995 atomic_dec(&ndlp->cmd_pending); 2996 goto out_host_busy_free_buf; 2997 } 2998 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2999 spin_unlock(shost->host_lock); 3000 lpfc_sli_handle_fast_ring_event(phba, 3001 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3002 3003 spin_lock(shost->host_lock); 3004 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3005 lpfc_poll_rearm_timer(phba); 3006 } 3007 3008 return 0; 3009 3010 out_host_busy_free_buf: 3011 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 3012 lpfc_release_scsi_buf(phba, lpfc_cmd); 3013 out_host_busy: 3014 return SCSI_MLQUEUE_HOST_BUSY; 3015 3016 out_fail_command: 3017 done(cmnd); 3018 return 0; 3019 } 3020 3021 /** 3022 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 3023 * @cmnd: Pointer to scsi_cmnd data structure. 3024 * 3025 * This routine aborts @cmnd pending in base driver. 3026 * 3027 * Return code : 3028 * 0x2003 - Error 3029 * 0x2002 - Success 3030 **/ 3031 static int 3032 lpfc_abort_handler(struct scsi_cmnd *cmnd) 3033 { 3034 struct Scsi_Host *shost = cmnd->device->host; 3035 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3036 struct lpfc_hba *phba = vport->phba; 3037 struct lpfc_iocbq *iocb; 3038 struct lpfc_iocbq *abtsiocb; 3039 struct lpfc_scsi_buf *lpfc_cmd; 3040 IOCB_t *cmd, *icmd; 3041 int ret = SUCCESS; 3042 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3043 3044 fc_block_scsi_eh(cmnd); 3045 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3046 BUG_ON(!lpfc_cmd); 3047 3048 /* 3049 * If pCmd field of the corresponding lpfc_scsi_buf structure 3050 * points to a different SCSI command, then the driver has 3051 * already completed this command, but the midlayer did not 3052 * see the completion before the eh fired. Just return 3053 * SUCCESS. 3054 */ 3055 iocb = &lpfc_cmd->cur_iocbq; 3056 if (lpfc_cmd->pCmd != cmnd) 3057 goto out; 3058 3059 BUG_ON(iocb->context1 != lpfc_cmd); 3060 3061 abtsiocb = lpfc_sli_get_iocbq(phba); 3062 if (abtsiocb == NULL) { 3063 ret = FAILED; 3064 goto out; 3065 } 3066 3067 /* 3068 * The scsi command can not be in txq and it is in flight because the 3069 * pCmd is still pointig at the SCSI command we have to abort. There 3070 * is no need to search the txcmplq. Just send an abort to the FW. 3071 */ 3072 3073 cmd = &iocb->iocb; 3074 icmd = &abtsiocb->iocb; 3075 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3076 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3077 if (phba->sli_rev == LPFC_SLI_REV4) 3078 icmd->un.acxri.abortIoTag = iocb->sli4_xritag; 3079 else 3080 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3081 3082 icmd->ulpLe = 1; 3083 icmd->ulpClass = cmd->ulpClass; 3084 3085 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 3086 abtsiocb->fcp_wqidx = iocb->fcp_wqidx; 3087 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 3088 3089 if (lpfc_is_link_up(phba)) 3090 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3091 else 3092 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 3093 3094 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3095 abtsiocb->vport = vport; 3096 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 3097 IOCB_ERROR) { 3098 lpfc_sli_release_iocbq(phba, abtsiocb); 3099 ret = FAILED; 3100 goto out; 3101 } 3102 3103 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3104 lpfc_sli_handle_fast_ring_event(phba, 3105 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3106 3107 lpfc_cmd->waitq = &waitq; 3108 /* Wait for abort to complete */ 3109 wait_event_timeout(waitq, 3110 (lpfc_cmd->pCmd != cmnd), 3111 (2*vport->cfg_devloss_tmo*HZ)); 3112 3113 spin_lock_irq(shost->host_lock); 3114 lpfc_cmd->waitq = NULL; 3115 spin_unlock_irq(shost->host_lock); 3116 3117 if (lpfc_cmd->pCmd == cmnd) { 3118 ret = FAILED; 3119 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3120 "0748 abort handler timed out waiting " 3121 "for abort to complete: ret %#x, ID %d, " 3122 "LUN %d, snum %#lx\n", 3123 ret, cmnd->device->id, cmnd->device->lun, 3124 cmnd->serial_number); 3125 } 3126 3127 out: 3128 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3129 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 3130 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3131 cmnd->device->lun, cmnd->serial_number); 3132 return ret; 3133 } 3134 3135 static char * 3136 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 3137 { 3138 switch (task_mgmt_cmd) { 3139 case FCP_ABORT_TASK_SET: 3140 return "ABORT_TASK_SET"; 3141 case FCP_CLEAR_TASK_SET: 3142 return "FCP_CLEAR_TASK_SET"; 3143 case FCP_BUS_RESET: 3144 return "FCP_BUS_RESET"; 3145 case FCP_LUN_RESET: 3146 return "FCP_LUN_RESET"; 3147 case FCP_TARGET_RESET: 3148 return "FCP_TARGET_RESET"; 3149 case FCP_CLEAR_ACA: 3150 return "FCP_CLEAR_ACA"; 3151 case FCP_TERMINATE_TASK: 3152 return "FCP_TERMINATE_TASK"; 3153 default: 3154 return "unknown"; 3155 } 3156 } 3157 3158 /** 3159 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 3160 * @vport: The virtual port for which this call is being executed. 3161 * @rdata: Pointer to remote port local data 3162 * @tgt_id: Target ID of remote device. 3163 * @lun_id: Lun number for the TMF 3164 * @task_mgmt_cmd: type of TMF to send 3165 * 3166 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 3167 * a remote port. 3168 * 3169 * Return Code: 3170 * 0x2003 - Error 3171 * 0x2002 - Success. 3172 **/ 3173 static int 3174 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, 3175 unsigned tgt_id, unsigned int lun_id, 3176 uint8_t task_mgmt_cmd) 3177 { 3178 struct lpfc_hba *phba = vport->phba; 3179 struct lpfc_scsi_buf *lpfc_cmd; 3180 struct lpfc_iocbq *iocbq; 3181 struct lpfc_iocbq *iocbqrsp; 3182 int ret; 3183 int status; 3184 3185 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) 3186 return FAILED; 3187 3188 lpfc_cmd = lpfc_get_scsi_buf(phba); 3189 if (lpfc_cmd == NULL) 3190 return FAILED; 3191 lpfc_cmd->timeout = 60; 3192 lpfc_cmd->rdata = rdata; 3193 3194 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 3195 task_mgmt_cmd); 3196 if (!status) { 3197 lpfc_release_scsi_buf(phba, lpfc_cmd); 3198 return FAILED; 3199 } 3200 3201 iocbq = &lpfc_cmd->cur_iocbq; 3202 iocbqrsp = lpfc_sli_get_iocbq(phba); 3203 if (iocbqrsp == NULL) { 3204 lpfc_release_scsi_buf(phba, lpfc_cmd); 3205 return FAILED; 3206 } 3207 3208 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3209 "0702 Issue %s to TGT %d LUN %d " 3210 "rpi x%x nlp_flag x%x\n", 3211 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3212 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 3213 3214 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3215 iocbq, iocbqrsp, lpfc_cmd->timeout); 3216 if (status != IOCB_SUCCESS) { 3217 if (status == IOCB_TIMEDOUT) { 3218 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3219 ret = TIMEOUT_ERROR; 3220 } else 3221 ret = FAILED; 3222 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3223 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3224 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3225 lpfc_taskmgmt_name(task_mgmt_cmd), 3226 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3227 iocbqrsp->iocb.un.ulpWord[4]); 3228 } else 3229 ret = SUCCESS; 3230 3231 lpfc_sli_release_iocbq(phba, iocbqrsp); 3232 3233 if (ret != TIMEOUT_ERROR) 3234 lpfc_release_scsi_buf(phba, lpfc_cmd); 3235 3236 return ret; 3237 } 3238 3239 /** 3240 * lpfc_chk_tgt_mapped - 3241 * @vport: The virtual port to check on 3242 * @cmnd: Pointer to scsi_cmnd data structure. 3243 * 3244 * This routine delays until the scsi target (aka rport) for the 3245 * command exists (is present and logged in) or we declare it non-existent. 3246 * 3247 * Return code : 3248 * 0x2003 - Error 3249 * 0x2002 - Success 3250 **/ 3251 static int 3252 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3253 { 3254 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3255 struct lpfc_nodelist *pnode; 3256 unsigned long later; 3257 3258 if (!rdata) { 3259 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3260 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 3261 return FAILED; 3262 } 3263 pnode = rdata->pnode; 3264 /* 3265 * If target is not in a MAPPED state, delay until 3266 * target is rediscovered or devloss timeout expires. 3267 */ 3268 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3269 while (time_after(later, jiffies)) { 3270 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3271 return FAILED; 3272 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 3273 return SUCCESS; 3274 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 3275 rdata = cmnd->device->hostdata; 3276 if (!rdata) 3277 return FAILED; 3278 pnode = rdata->pnode; 3279 } 3280 if (!pnode || !NLP_CHK_NODE_ACT(pnode) || 3281 (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 3282 return FAILED; 3283 return SUCCESS; 3284 } 3285 3286 /** 3287 * lpfc_reset_flush_io_context - 3288 * @vport: The virtual port (scsi_host) for the flush context 3289 * @tgt_id: If aborting by Target contect - specifies the target id 3290 * @lun_id: If aborting by Lun context - specifies the lun id 3291 * @context: specifies the context level to flush at. 3292 * 3293 * After a reset condition via TMF, we need to flush orphaned i/o 3294 * contexts from the adapter. This routine aborts any contexts 3295 * outstanding, then waits for their completions. The wait is 3296 * bounded by devloss_tmo though. 3297 * 3298 * Return code : 3299 * 0x2003 - Error 3300 * 0x2002 - Success 3301 **/ 3302 static int 3303 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 3304 uint64_t lun_id, lpfc_ctx_cmd context) 3305 { 3306 struct lpfc_hba *phba = vport->phba; 3307 unsigned long later; 3308 int cnt; 3309 3310 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3311 if (cnt) 3312 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3313 tgt_id, lun_id, context); 3314 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3315 while (time_after(later, jiffies) && cnt) { 3316 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3317 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3318 } 3319 if (cnt) { 3320 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3321 "0724 I/O flush failure for context %s : cnt x%x\n", 3322 ((context == LPFC_CTX_LUN) ? "LUN" : 3323 ((context == LPFC_CTX_TGT) ? "TGT" : 3324 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 3325 cnt); 3326 return FAILED; 3327 } 3328 return SUCCESS; 3329 } 3330 3331 /** 3332 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3333 * @cmnd: Pointer to scsi_cmnd data structure. 3334 * 3335 * This routine does a device reset by sending a LUN_RESET task management 3336 * command. 3337 * 3338 * Return code : 3339 * 0x2003 - Error 3340 * 0x2002 - Success 3341 **/ 3342 static int 3343 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3344 { 3345 struct Scsi_Host *shost = cmnd->device->host; 3346 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3347 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3348 struct lpfc_nodelist *pnode; 3349 unsigned tgt_id = cmnd->device->id; 3350 unsigned int lun_id = cmnd->device->lun; 3351 struct lpfc_scsi_event_header scsi_event; 3352 int status; 3353 3354 if (!rdata) { 3355 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3356 "0798 Device Reset rport failure: rdata x%p\n", rdata); 3357 return FAILED; 3358 } 3359 pnode = rdata->pnode; 3360 fc_block_scsi_eh(cmnd); 3361 3362 status = lpfc_chk_tgt_mapped(vport, cmnd); 3363 if (status == FAILED) { 3364 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3365 "0721 Device Reset rport failure: rdata x%p\n", rdata); 3366 return FAILED; 3367 } 3368 3369 scsi_event.event_type = FC_REG_SCSI_EVENT; 3370 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 3371 scsi_event.lun = lun_id; 3372 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3373 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3374 3375 fc_host_post_vendor_event(shost, fc_get_event_number(), 3376 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3377 3378 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3379 FCP_LUN_RESET); 3380 3381 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3382 "0713 SCSI layer issued Device Reset (%d, %d) " 3383 "return x%x\n", tgt_id, lun_id, status); 3384 3385 /* 3386 * We have to clean up i/o as : they may be orphaned by the TMF; 3387 * or if the TMF failed, they may be in an indeterminate state. 3388 * So, continue on. 3389 * We will report success if all the i/o aborts successfully. 3390 */ 3391 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3392 LPFC_CTX_LUN); 3393 return status; 3394 } 3395 3396 /** 3397 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 3398 * @cmnd: Pointer to scsi_cmnd data structure. 3399 * 3400 * This routine does a target reset by sending a TARGET_RESET task management 3401 * command. 3402 * 3403 * Return code : 3404 * 0x2003 - Error 3405 * 0x2002 - Success 3406 **/ 3407 static int 3408 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 3409 { 3410 struct Scsi_Host *shost = cmnd->device->host; 3411 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3412 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3413 struct lpfc_nodelist *pnode; 3414 unsigned tgt_id = cmnd->device->id; 3415 unsigned int lun_id = cmnd->device->lun; 3416 struct lpfc_scsi_event_header scsi_event; 3417 int status; 3418 3419 if (!rdata) { 3420 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3421 "0799 Target Reset rport failure: rdata x%p\n", rdata); 3422 return FAILED; 3423 } 3424 pnode = rdata->pnode; 3425 fc_block_scsi_eh(cmnd); 3426 3427 status = lpfc_chk_tgt_mapped(vport, cmnd); 3428 if (status == FAILED) { 3429 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3430 "0722 Target Reset rport failure: rdata x%p\n", rdata); 3431 return FAILED; 3432 } 3433 3434 scsi_event.event_type = FC_REG_SCSI_EVENT; 3435 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 3436 scsi_event.lun = 0; 3437 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3438 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3439 3440 fc_host_post_vendor_event(shost, fc_get_event_number(), 3441 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3442 3443 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3444 FCP_TARGET_RESET); 3445 3446 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3447 "0723 SCSI layer issued Target Reset (%d, %d) " 3448 "return x%x\n", tgt_id, lun_id, status); 3449 3450 /* 3451 * We have to clean up i/o as : they may be orphaned by the TMF; 3452 * or if the TMF failed, they may be in an indeterminate state. 3453 * So, continue on. 3454 * We will report success if all the i/o aborts successfully. 3455 */ 3456 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3457 LPFC_CTX_TGT); 3458 return status; 3459 } 3460 3461 /** 3462 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3463 * @cmnd: Pointer to scsi_cmnd data structure. 3464 * 3465 * This routine does target reset to all targets on @cmnd->device->host. 3466 * This emulates Parallel SCSI Bus Reset Semantics. 3467 * 3468 * Return code : 3469 * 0x2003 - Error 3470 * 0x2002 - Success 3471 **/ 3472 static int 3473 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3474 { 3475 struct Scsi_Host *shost = cmnd->device->host; 3476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3477 struct lpfc_nodelist *ndlp = NULL; 3478 struct lpfc_scsi_event_header scsi_event; 3479 int match; 3480 int ret = SUCCESS, status, i; 3481 3482 scsi_event.event_type = FC_REG_SCSI_EVENT; 3483 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3484 scsi_event.lun = 0; 3485 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3486 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3487 3488 fc_host_post_vendor_event(shost, fc_get_event_number(), 3489 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3490 3491 fc_block_scsi_eh(cmnd); 3492 3493 /* 3494 * Since the driver manages a single bus device, reset all 3495 * targets known to the driver. Should any target reset 3496 * fail, this routine returns failure to the midlayer. 3497 */ 3498 for (i = 0; i < LPFC_MAX_TARGET; i++) { 3499 /* Search for mapped node by target ID */ 3500 match = 0; 3501 spin_lock_irq(shost->host_lock); 3502 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3503 if (!NLP_CHK_NODE_ACT(ndlp)) 3504 continue; 3505 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 3506 ndlp->nlp_sid == i && 3507 ndlp->rport) { 3508 match = 1; 3509 break; 3510 } 3511 } 3512 spin_unlock_irq(shost->host_lock); 3513 if (!match) 3514 continue; 3515 3516 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, 3517 i, 0, FCP_TARGET_RESET); 3518 3519 if (status != SUCCESS) { 3520 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3521 "0700 Bus Reset on target %d failed\n", 3522 i); 3523 ret = FAILED; 3524 } 3525 } 3526 /* 3527 * We have to clean up i/o as : they may be orphaned by the TMFs 3528 * above; or if any of the TMFs failed, they may be in an 3529 * indeterminate state. 3530 * We will report success if all the i/o aborts successfully. 3531 */ 3532 3533 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 3534 if (status != SUCCESS) 3535 ret = FAILED; 3536 3537 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3538 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3539 return ret; 3540 } 3541 3542 /** 3543 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 3544 * @sdev: Pointer to scsi_device. 3545 * 3546 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 3547 * globally available list of scsi buffers. This routine also makes sure scsi 3548 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 3549 * of scsi buffer exists for the lifetime of the driver. 3550 * 3551 * Return codes: 3552 * non-0 - Error 3553 * 0 - Success 3554 **/ 3555 static int 3556 lpfc_slave_alloc(struct scsi_device *sdev) 3557 { 3558 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3559 struct lpfc_hba *phba = vport->phba; 3560 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3561 uint32_t total = 0; 3562 uint32_t num_to_alloc = 0; 3563 int num_allocated = 0; 3564 3565 if (!rport || fc_remote_port_chkready(rport)) 3566 return -ENXIO; 3567 3568 sdev->hostdata = rport->dd_data; 3569 3570 /* 3571 * Populate the cmds_per_lun count scsi_bufs into this host's globally 3572 * available list of scsi buffers. Don't allocate more than the 3573 * HBA limit conveyed to the midlayer via the host structure. The 3574 * formula accounts for the lun_queue_depth + error handlers + 1 3575 * extra. This list of scsi bufs exists for the lifetime of the driver. 3576 */ 3577 total = phba->total_scsi_bufs; 3578 num_to_alloc = vport->cfg_lun_queue_depth + 2; 3579 3580 /* Allow some exchanges to be available always to complete discovery */ 3581 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3582 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3583 "0704 At limitation of %d preallocated " 3584 "command buffers\n", total); 3585 return 0; 3586 /* Allow some exchanges to be available always to complete discovery */ 3587 } else if (total + num_to_alloc > 3588 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3589 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3590 "0705 Allocation request of %d " 3591 "command buffers will exceed max of %d. " 3592 "Reducing allocation request to %d.\n", 3593 num_to_alloc, phba->cfg_hba_queue_depth, 3594 (phba->cfg_hba_queue_depth - total)); 3595 num_to_alloc = phba->cfg_hba_queue_depth - total; 3596 } 3597 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 3598 if (num_to_alloc != num_allocated) { 3599 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3600 "0708 Allocation request of %d " 3601 "command buffers did not succeed. " 3602 "Allocated %d buffers.\n", 3603 num_to_alloc, num_allocated); 3604 } 3605 if (num_allocated > 0) 3606 phba->total_scsi_bufs += num_allocated; 3607 return 0; 3608 } 3609 3610 /** 3611 * lpfc_slave_configure - scsi_host_template slave_configure entry point 3612 * @sdev: Pointer to scsi_device. 3613 * 3614 * This routine configures following items 3615 * - Tag command queuing support for @sdev if supported. 3616 * - Dev loss time out value of fc_rport. 3617 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 3618 * 3619 * Return codes: 3620 * 0 - Success 3621 **/ 3622 static int 3623 lpfc_slave_configure(struct scsi_device *sdev) 3624 { 3625 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3626 struct lpfc_hba *phba = vport->phba; 3627 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 3628 3629 if (sdev->tagged_supported) 3630 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 3631 else 3632 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 3633 3634 /* 3635 * Initialize the fc transport attributes for the target 3636 * containing this scsi device. Also note that the driver's 3637 * target pointer is stored in the starget_data for the 3638 * driver's sysfs entry point functions. 3639 */ 3640 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3641 3642 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3643 lpfc_sli_handle_fast_ring_event(phba, 3644 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3645 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3646 lpfc_poll_rearm_timer(phba); 3647 } 3648 3649 return 0; 3650 } 3651 3652 /** 3653 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 3654 * @sdev: Pointer to scsi_device. 3655 * 3656 * This routine sets @sdev hostatdata filed to null. 3657 **/ 3658 static void 3659 lpfc_slave_destroy(struct scsi_device *sdev) 3660 { 3661 sdev->hostdata = NULL; 3662 return; 3663 } 3664 3665 3666 struct scsi_host_template lpfc_template = { 3667 .module = THIS_MODULE, 3668 .name = LPFC_DRIVER_NAME, 3669 .info = lpfc_info, 3670 .queuecommand = lpfc_queuecommand, 3671 .eh_abort_handler = lpfc_abort_handler, 3672 .eh_device_reset_handler = lpfc_device_reset_handler, 3673 .eh_target_reset_handler = lpfc_target_reset_handler, 3674 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3675 .slave_alloc = lpfc_slave_alloc, 3676 .slave_configure = lpfc_slave_configure, 3677 .slave_destroy = lpfc_slave_destroy, 3678 .scan_finished = lpfc_scan_finished, 3679 .this_id = -1, 3680 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3681 .cmd_per_lun = LPFC_CMD_PER_LUN, 3682 .use_clustering = ENABLE_CLUSTERING, 3683 .shost_attrs = lpfc_hba_attrs, 3684 .max_sectors = 0xFFFF, 3685 .vendor_id = LPFC_NL_VENDOR_ID, 3686 .change_queue_depth = lpfc_change_queue_depth, 3687 }; 3688 3689 struct scsi_host_template lpfc_vport_template = { 3690 .module = THIS_MODULE, 3691 .name = LPFC_DRIVER_NAME, 3692 .info = lpfc_info, 3693 .queuecommand = lpfc_queuecommand, 3694 .eh_abort_handler = lpfc_abort_handler, 3695 .eh_device_reset_handler = lpfc_device_reset_handler, 3696 .eh_target_reset_handler = lpfc_target_reset_handler, 3697 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3698 .slave_alloc = lpfc_slave_alloc, 3699 .slave_configure = lpfc_slave_configure, 3700 .slave_destroy = lpfc_slave_destroy, 3701 .scan_finished = lpfc_scan_finished, 3702 .this_id = -1, 3703 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3704 .cmd_per_lun = LPFC_CMD_PER_LUN, 3705 .use_clustering = ENABLE_CLUSTERING, 3706 .shost_attrs = lpfc_vport_attrs, 3707 .max_sectors = 0xFFFF, 3708 .change_queue_depth = lpfc_change_queue_depth, 3709 }; 3710