1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 #include <linux/pci.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_eh.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_tcq.h> 32 #include <scsi/scsi_transport_fc.h> 33 34 #include "lpfc_version.h" 35 #include "lpfc_hw4.h" 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_sli4.h" 39 #include "lpfc_nl.h" 40 #include "lpfc_disc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc.h" 43 #include "lpfc_logmsg.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_vport.h" 46 47 #define LPFC_RESET_WAIT 2 48 #define LPFC_ABORT_WAIT 2 49 50 int _dump_buf_done; 51 52 static char *dif_op_str[] = { 53 "SCSI_PROT_NORMAL", 54 "SCSI_PROT_READ_INSERT", 55 "SCSI_PROT_WRITE_STRIP", 56 "SCSI_PROT_READ_STRIP", 57 "SCSI_PROT_WRITE_INSERT", 58 "SCSI_PROT_READ_PASS", 59 "SCSI_PROT_WRITE_PASS", 60 }; 61 static void 62 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 63 static void 64 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 65 66 static void 67 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 68 { 69 void *src, *dst; 70 struct scatterlist *sgde = scsi_sglist(cmnd); 71 72 if (!_dump_buf_data) { 73 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 75 __func__); 76 return; 77 } 78 79 80 if (!sgde) { 81 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 82 "9051 BLKGRD: ERROR: data scatterlist is null\n"); 83 return; 84 } 85 86 dst = (void *) _dump_buf_data; 87 while (sgde) { 88 src = sg_virt(sgde); 89 memcpy(dst, src, sgde->length); 90 dst += sgde->length; 91 sgde = sg_next(sgde); 92 } 93 } 94 95 static void 96 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) 97 { 98 void *src, *dst; 99 struct scatterlist *sgde = scsi_prot_sglist(cmnd); 100 101 if (!_dump_buf_dif) { 102 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", 104 __func__); 105 return; 106 } 107 108 if (!sgde) { 109 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 110 "9053 BLKGRD: ERROR: prot scatterlist is null\n"); 111 return; 112 } 113 114 dst = _dump_buf_dif; 115 while (sgde) { 116 src = sg_virt(sgde); 117 memcpy(dst, src, sgde->length); 118 dst += sgde->length; 119 sgde = sg_next(sgde); 120 } 121 } 122 123 /** 124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. 125 * @phba: Pointer to HBA object. 126 * @lpfc_cmd: lpfc scsi command object pointer. 127 * 128 * This function is called from the lpfc_prep_task_mgmt_cmd function to 129 * set the last bit in the response sge entry. 130 **/ 131 static void 132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, 133 struct lpfc_scsi_buf *lpfc_cmd) 134 { 135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 136 if (sgl) { 137 sgl += 1; 138 sgl->word2 = le32_to_cpu(sgl->word2); 139 bf_set(lpfc_sli4_sge_last, sgl, 1); 140 sgl->word2 = cpu_to_le32(sgl->word2); 141 } 142 } 143 144 /** 145 * lpfc_update_stats - Update statistical data for the command completion 146 * @phba: Pointer to HBA object. 147 * @lpfc_cmd: lpfc scsi command object pointer. 148 * 149 * This function is called when there is a command completion and this 150 * function updates the statistical data for the command completion. 151 **/ 152 static void 153 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 154 { 155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 156 struct lpfc_nodelist *pnode = rdata->pnode; 157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 158 unsigned long flags; 159 struct Scsi_Host *shost = cmd->device->host; 160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 161 unsigned long latency; 162 int i; 163 164 if (cmd->result) 165 return; 166 167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time); 168 169 spin_lock_irqsave(shost->host_lock, flags); 170 if (!vport->stat_data_enabled || 171 vport->stat_data_blocked || 172 !pnode || 173 !pnode->lat_data || 174 (phba->bucket_type == LPFC_NO_BUCKET)) { 175 spin_unlock_irqrestore(shost->host_lock, flags); 176 return; 177 } 178 179 if (phba->bucket_type == LPFC_LINEAR_BUCKET) { 180 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/ 181 phba->bucket_step; 182 /* check array subscript bounds */ 183 if (i < 0) 184 i = 0; 185 else if (i >= LPFC_MAX_BUCKET_COUNT) 186 i = LPFC_MAX_BUCKET_COUNT - 1; 187 } else { 188 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++) 189 if (latency <= (phba->bucket_base + 190 ((1<<i)*phba->bucket_step))) 191 break; 192 } 193 194 pnode->lat_data[i].cmd_count++; 195 spin_unlock_irqrestore(shost->host_lock, flags); 196 } 197 198 /** 199 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event 200 * @phba: Pointer to HBA context object. 201 * @vport: Pointer to vport object. 202 * @ndlp: Pointer to FC node associated with the target. 203 * @lun: Lun number of the scsi device. 204 * @old_val: Old value of the queue depth. 205 * @new_val: New value of the queue depth. 206 * 207 * This function sends an event to the mgmt application indicating 208 * there is a change in the scsi device queue depth. 209 **/ 210 static void 211 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, 212 struct lpfc_vport *vport, 213 struct lpfc_nodelist *ndlp, 214 uint32_t lun, 215 uint32_t old_val, 216 uint32_t new_val) 217 { 218 struct lpfc_fast_path_event *fast_path_evt; 219 unsigned long flags; 220 221 fast_path_evt = lpfc_alloc_fast_evt(phba); 222 if (!fast_path_evt) 223 return; 224 225 fast_path_evt->un.queue_depth_evt.scsi_event.event_type = 226 FC_REG_SCSI_EVENT; 227 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = 228 LPFC_EVENT_VARQUEDEPTH; 229 230 /* Report all luns with change in queue depth */ 231 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; 232 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 233 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, 234 &ndlp->nlp_portname, sizeof(struct lpfc_name)); 235 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, 236 &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 237 } 238 239 fast_path_evt->un.queue_depth_evt.oldval = old_val; 240 fast_path_evt->un.queue_depth_evt.newval = new_val; 241 fast_path_evt->vport = vport; 242 243 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 244 spin_lock_irqsave(&phba->hbalock, flags); 245 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 246 spin_unlock_irqrestore(&phba->hbalock, flags); 247 lpfc_worker_wake_up(phba); 248 249 return; 250 } 251 252 /** 253 * lpfc_change_queue_depth - Alter scsi device queue depth 254 * @sdev: Pointer the scsi device on which to change the queue depth. 255 * @qdepth: New queue depth to set the sdev to. 256 * @reason: The reason for the queue depth change. 257 * 258 * This function is called by the midlayer and the LLD to alter the queue 259 * depth for a scsi device. This function sets the queue depth to the new 260 * value and sends an event out to log the queue depth change. 261 **/ 262 int 263 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 264 { 265 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 266 struct lpfc_hba *phba = vport->phba; 267 struct lpfc_rport_data *rdata; 268 unsigned long new_queue_depth, old_queue_depth; 269 270 old_queue_depth = sdev->queue_depth; 271 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); 272 new_queue_depth = sdev->queue_depth; 273 rdata = sdev->hostdata; 274 if (rdata) 275 lpfc_send_sdev_queuedepth_change_event(phba, vport, 276 rdata->pnode, sdev->lun, 277 old_queue_depth, 278 new_queue_depth); 279 return sdev->queue_depth; 280 } 281 282 /** 283 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread 284 * @phba: The Hba for which this call is being executed. 285 * 286 * This routine is called when there is resource error in driver or firmware. 287 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine 288 * posts at most 1 event each second. This routine wakes up worker thread of 289 * @phba to process WORKER_RAM_DOWN_EVENT event. 290 * 291 * This routine should be called with no lock held. 292 **/ 293 void 294 lpfc_rampdown_queue_depth(struct lpfc_hba *phba) 295 { 296 unsigned long flags; 297 uint32_t evt_posted; 298 299 spin_lock_irqsave(&phba->hbalock, flags); 300 atomic_inc(&phba->num_rsrc_err); 301 phba->last_rsrc_error_time = jiffies; 302 303 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 304 spin_unlock_irqrestore(&phba->hbalock, flags); 305 return; 306 } 307 308 phba->last_ramp_down_time = jiffies; 309 310 spin_unlock_irqrestore(&phba->hbalock, flags); 311 312 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 313 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE; 314 if (!evt_posted) 315 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 316 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 317 318 if (!evt_posted) 319 lpfc_worker_wake_up(phba); 320 return; 321 } 322 323 /** 324 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread 325 * @phba: The Hba for which this call is being executed. 326 * 327 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine 328 * post at most 1 event every 5 minute after last_ramp_up_time or 329 * last_rsrc_error_time. This routine wakes up worker thread of @phba 330 * to process WORKER_RAM_DOWN_EVENT event. 331 * 332 * This routine should be called with no lock held. 333 **/ 334 static inline void 335 lpfc_rampup_queue_depth(struct lpfc_vport *vport, 336 uint32_t queue_depth) 337 { 338 unsigned long flags; 339 struct lpfc_hba *phba = vport->phba; 340 uint32_t evt_posted; 341 atomic_inc(&phba->num_cmd_success); 342 343 if (vport->cfg_lun_queue_depth <= queue_depth) 344 return; 345 spin_lock_irqsave(&phba->hbalock, flags); 346 if (time_before(jiffies, 347 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || 348 time_before(jiffies, 349 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { 350 spin_unlock_irqrestore(&phba->hbalock, flags); 351 return; 352 } 353 phba->last_ramp_up_time = jiffies; 354 spin_unlock_irqrestore(&phba->hbalock, flags); 355 356 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 357 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; 358 if (!evt_posted) 359 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 360 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 361 362 if (!evt_posted) 363 lpfc_worker_wake_up(phba); 364 return; 365 } 366 367 /** 368 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler 369 * @phba: The Hba for which this call is being executed. 370 * 371 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker 372 * thread.This routine reduces queue depth for all scsi device on each vport 373 * associated with @phba. 374 **/ 375 void 376 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 377 { 378 struct lpfc_vport **vports; 379 struct Scsi_Host *shost; 380 struct scsi_device *sdev; 381 unsigned long new_queue_depth; 382 unsigned long num_rsrc_err, num_cmd_success; 383 int i; 384 385 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 386 num_cmd_success = atomic_read(&phba->num_cmd_success); 387 388 vports = lpfc_create_vport_work_array(phba); 389 if (vports != NULL) 390 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 391 shost = lpfc_shost_from_vport(vports[i]); 392 shost_for_each_device(sdev, shost) { 393 new_queue_depth = 394 sdev->queue_depth * num_rsrc_err / 395 (num_rsrc_err + num_cmd_success); 396 if (!new_queue_depth) 397 new_queue_depth = sdev->queue_depth - 1; 398 else 399 new_queue_depth = sdev->queue_depth - 400 new_queue_depth; 401 lpfc_change_queue_depth(sdev, new_queue_depth, 402 SCSI_QDEPTH_DEFAULT); 403 } 404 } 405 lpfc_destroy_vport_work_array(phba, vports); 406 atomic_set(&phba->num_rsrc_err, 0); 407 atomic_set(&phba->num_cmd_success, 0); 408 } 409 410 /** 411 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler 412 * @phba: The Hba for which this call is being executed. 413 * 414 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker 415 * thread.This routine increases queue depth for all scsi device on each vport 416 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and 417 * num_cmd_success to zero. 418 **/ 419 void 420 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 421 { 422 struct lpfc_vport **vports; 423 struct Scsi_Host *shost; 424 struct scsi_device *sdev; 425 int i; 426 427 vports = lpfc_create_vport_work_array(phba); 428 if (vports != NULL) 429 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 430 shost = lpfc_shost_from_vport(vports[i]); 431 shost_for_each_device(sdev, shost) { 432 if (vports[i]->cfg_lun_queue_depth <= 433 sdev->queue_depth) 434 continue; 435 lpfc_change_queue_depth(sdev, 436 sdev->queue_depth+1, 437 SCSI_QDEPTH_RAMP_UP); 438 } 439 } 440 lpfc_destroy_vport_work_array(phba, vports); 441 atomic_set(&phba->num_rsrc_err, 0); 442 atomic_set(&phba->num_cmd_success, 0); 443 } 444 445 /** 446 * lpfc_scsi_dev_block - set all scsi hosts to block state 447 * @phba: Pointer to HBA context object. 448 * 449 * This function walks vport list and set each SCSI host to block state 450 * by invoking fc_remote_port_delete() routine. This function is invoked 451 * with EEH when device's PCI slot has been permanently disabled. 452 **/ 453 void 454 lpfc_scsi_dev_block(struct lpfc_hba *phba) 455 { 456 struct lpfc_vport **vports; 457 struct Scsi_Host *shost; 458 struct scsi_device *sdev; 459 struct fc_rport *rport; 460 int i; 461 462 vports = lpfc_create_vport_work_array(phba); 463 if (vports != NULL) 464 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 465 shost = lpfc_shost_from_vport(vports[i]); 466 shost_for_each_device(sdev, shost) { 467 rport = starget_to_rport(scsi_target(sdev)); 468 fc_remote_port_delete(rport); 469 } 470 } 471 lpfc_destroy_vport_work_array(phba, vports); 472 } 473 474 /** 475 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec 476 * @vport: The virtual port for which this call being executed. 477 * @num_to_allocate: The requested number of buffers to allocate. 478 * 479 * This routine allocates a scsi buffer for device with SLI-3 interface spec, 480 * the scsi buffer contains all the necessary information needed to initiate 481 * a SCSI I/O. The non-DMAable buffer region contains information to build 482 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, 483 * and the initial BPL. In addition to allocating memory, the FCP CMND and 484 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. 485 * 486 * Return codes: 487 * int - number of scsi buffers that were allocated. 488 * 0 = failure, less than num_to_alloc is a partial failure. 489 **/ 490 static int 491 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) 492 { 493 struct lpfc_hba *phba = vport->phba; 494 struct lpfc_scsi_buf *psb; 495 struct ulp_bde64 *bpl; 496 IOCB_t *iocb; 497 dma_addr_t pdma_phys_fcp_cmd; 498 dma_addr_t pdma_phys_fcp_rsp; 499 dma_addr_t pdma_phys_bpl; 500 uint16_t iotag; 501 int bcnt; 502 503 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 504 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 505 if (!psb) 506 break; 507 508 /* 509 * Get memory from the pci pool to map the virt space to pci 510 * bus space for an I/O. The DMA buffer includes space for the 511 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 512 * necessary to support the sg_tablesize. 513 */ 514 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 515 GFP_KERNEL, &psb->dma_handle); 516 if (!psb->data) { 517 kfree(psb); 518 break; 519 } 520 521 /* Initialize virtual ptrs to dma_buf region. */ 522 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 523 524 /* Allocate iotag for psb->cur_iocbq. */ 525 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 526 if (iotag == 0) { 527 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 528 psb->data, psb->dma_handle); 529 kfree(psb); 530 break; 531 } 532 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 533 534 psb->fcp_cmnd = psb->data; 535 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 536 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 537 sizeof(struct fcp_rsp); 538 539 /* Initialize local short-hand pointers. */ 540 bpl = psb->fcp_bpl; 541 pdma_phys_fcp_cmd = psb->dma_handle; 542 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); 543 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + 544 sizeof(struct fcp_rsp); 545 546 /* 547 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 548 * are sg list bdes. Initialize the first two and leave the 549 * rest for queuecommand. 550 */ 551 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); 552 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); 553 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); 554 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 555 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); 556 557 /* Setup the physical region for the FCP RSP */ 558 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); 559 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); 560 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); 561 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; 562 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); 563 564 /* 565 * Since the IOCB for the FCP I/O is built into this 566 * lpfc_scsi_buf, initialize it with all known data now. 567 */ 568 iocb = &psb->cur_iocbq.iocb; 569 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 570 if ((phba->sli_rev == 3) && 571 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { 572 /* fill in immediate fcp command BDE */ 573 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; 574 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 575 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, 576 unsli3.fcp_ext.icd); 577 iocb->un.fcpi64.bdl.addrHigh = 0; 578 iocb->ulpBdeCount = 0; 579 iocb->ulpLe = 0; 580 /* fill in responce BDE */ 581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = 582 BUFF_TYPE_BDE_64; 583 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = 584 sizeof(struct fcp_rsp); 585 iocb->unsli3.fcp_ext.rbde.addrLow = 586 putPaddrLow(pdma_phys_fcp_rsp); 587 iocb->unsli3.fcp_ext.rbde.addrHigh = 588 putPaddrHigh(pdma_phys_fcp_rsp); 589 } else { 590 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 591 iocb->un.fcpi64.bdl.bdeSize = 592 (2 * sizeof(struct ulp_bde64)); 593 iocb->un.fcpi64.bdl.addrLow = 594 putPaddrLow(pdma_phys_bpl); 595 iocb->un.fcpi64.bdl.addrHigh = 596 putPaddrHigh(pdma_phys_bpl); 597 iocb->ulpBdeCount = 1; 598 iocb->ulpLe = 1; 599 } 600 iocb->ulpClass = CLASS3; 601 psb->status = IOSTAT_SUCCESS; 602 /* Put it back into the SCSI buffer list */ 603 psb->cur_iocbq.context1 = psb; 604 lpfc_release_scsi_buf_s3(phba, psb); 605 606 } 607 608 return bcnt; 609 } 610 611 /** 612 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort 613 * @phba: pointer to lpfc hba data structure. 614 * @axri: pointer to the fcp xri abort wcqe structure. 615 * 616 * This routine is invoked by the worker thread to process a SLI4 fast-path 617 * FCP aborted xri. 618 **/ 619 void 620 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, 621 struct sli4_wcqe_xri_aborted *axri) 622 { 623 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 624 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 625 struct lpfc_scsi_buf *psb, *next_psb; 626 unsigned long iflag = 0; 627 struct lpfc_iocbq *iocbq; 628 int i; 629 struct lpfc_nodelist *ndlp; 630 int rrq_empty = 0; 631 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 632 633 spin_lock_irqsave(&phba->hbalock, iflag); 634 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 635 list_for_each_entry_safe(psb, next_psb, 636 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { 637 if (psb->cur_iocbq.sli4_xritag == xri) { 638 list_del(&psb->list); 639 psb->exch_busy = 0; 640 psb->status = IOSTAT_SUCCESS; 641 spin_unlock( 642 &phba->sli4_hba.abts_scsi_buf_list_lock); 643 ndlp = psb->rdata->pnode; 644 rrq_empty = list_empty(&phba->active_rrq_list); 645 spin_unlock_irqrestore(&phba->hbalock, iflag); 646 if (ndlp) 647 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); 648 lpfc_release_scsi_buf_s4(phba, psb); 649 if (rrq_empty) 650 lpfc_worker_wake_up(phba); 651 return; 652 } 653 } 654 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 655 for (i = 1; i <= phba->sli.last_iotag; i++) { 656 iocbq = phba->sli.iocbq_lookup[i]; 657 658 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 659 (iocbq->iocb_flag & LPFC_IO_LIBDFC)) 660 continue; 661 if (iocbq->sli4_xritag != xri) 662 continue; 663 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 664 psb->exch_busy = 0; 665 spin_unlock_irqrestore(&phba->hbalock, iflag); 666 if (pring->txq_cnt) 667 lpfc_worker_wake_up(phba); 668 return; 669 670 } 671 spin_unlock_irqrestore(&phba->hbalock, iflag); 672 } 673 674 /** 675 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block 676 * @phba: pointer to lpfc hba data structure. 677 * 678 * This routine walks the list of scsi buffers that have been allocated and 679 * repost them to the HBA by using SGL block post. This is needed after a 680 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 681 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list 682 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. 683 * 684 * Returns: 0 = success, non-zero failure. 685 **/ 686 int 687 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) 688 { 689 struct lpfc_scsi_buf *psb; 690 int index, status, bcnt = 0, rcnt = 0, rc = 0; 691 LIST_HEAD(sblist); 692 693 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { 694 psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; 695 if (psb) { 696 /* Remove from SCSI buffer list */ 697 list_del(&psb->list); 698 /* Add it to a local SCSI buffer list */ 699 list_add_tail(&psb->list, &sblist); 700 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { 701 bcnt = rcnt; 702 rcnt = 0; 703 } 704 } else 705 /* A hole present in the XRI array, need to skip */ 706 bcnt = rcnt; 707 708 if (index == phba->sli4_hba.scsi_xri_cnt - 1) 709 /* End of XRI array for SCSI buffer, complete */ 710 bcnt = rcnt; 711 712 /* Continue until collect up to a nembed page worth of sgls */ 713 if (bcnt == 0) 714 continue; 715 /* Now, post the SCSI buffer list sgls as a block */ 716 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 717 /* Reset SCSI buffer count for next round of posting */ 718 bcnt = 0; 719 while (!list_empty(&sblist)) { 720 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 721 list); 722 if (status) { 723 /* Put this back on the abort scsi list */ 724 psb->exch_busy = 1; 725 rc++; 726 } else { 727 psb->exch_busy = 0; 728 psb->status = IOSTAT_SUCCESS; 729 } 730 /* Put it back into the SCSI buffer list */ 731 lpfc_release_scsi_buf_s4(phba, psb); 732 } 733 } 734 return rc; 735 } 736 737 /** 738 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec 739 * @vport: The virtual port for which this call being executed. 740 * @num_to_allocate: The requested number of buffers to allocate. 741 * 742 * This routine allocates a scsi buffer for device with SLI-4 interface spec, 743 * the scsi buffer contains all the necessary information needed to initiate 744 * a SCSI I/O. 745 * 746 * Return codes: 747 * int - number of scsi buffers that were allocated. 748 * 0 = failure, less than num_to_alloc is a partial failure. 749 **/ 750 static int 751 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) 752 { 753 struct lpfc_hba *phba = vport->phba; 754 struct lpfc_scsi_buf *psb; 755 struct sli4_sge *sgl; 756 IOCB_t *iocb; 757 dma_addr_t pdma_phys_fcp_cmd; 758 dma_addr_t pdma_phys_fcp_rsp; 759 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; 760 uint16_t iotag, last_xritag = NO_XRI; 761 int status = 0, index; 762 int bcnt; 763 int non_sequential_xri = 0; 764 LIST_HEAD(sblist); 765 766 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { 767 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 768 if (!psb) 769 break; 770 771 /* 772 * Get memory from the pci pool to map the virt space to pci bus 773 * space for an I/O. The DMA buffer includes space for the 774 * struct fcp_cmnd, struct fcp_rsp and the number of bde's 775 * necessary to support the sg_tablesize. 776 */ 777 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, 778 GFP_KERNEL, &psb->dma_handle); 779 if (!psb->data) { 780 kfree(psb); 781 break; 782 } 783 784 /* Initialize virtual ptrs to dma_buf region. */ 785 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 786 787 /* Allocate iotag for psb->cur_iocbq. */ 788 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 789 if (iotag == 0) { 790 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 791 psb->data, psb->dma_handle); 792 kfree(psb); 793 break; 794 } 795 796 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); 797 if (psb->cur_iocbq.sli4_xritag == NO_XRI) { 798 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 799 psb->data, psb->dma_handle); 800 kfree(psb); 801 break; 802 } 803 if (last_xritag != NO_XRI 804 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { 805 non_sequential_xri = 1; 806 } else 807 list_add_tail(&psb->list, &sblist); 808 last_xritag = psb->cur_iocbq.sli4_xritag; 809 810 index = phba->sli4_hba.scsi_xri_cnt++; 811 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 812 813 psb->fcp_bpl = psb->data; 814 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) 815 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 816 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + 817 sizeof(struct fcp_cmnd)); 818 819 /* Initialize local short-hand pointers. */ 820 sgl = (struct sli4_sge *)psb->fcp_bpl; 821 pdma_phys_bpl = psb->dma_handle; 822 pdma_phys_fcp_cmd = 823 (psb->dma_handle + phba->cfg_sg_dma_buf_size) 824 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); 825 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); 826 827 /* 828 * The first two bdes are the FCP_CMD and FCP_RSP. The balance 829 * are sg list bdes. Initialize the first two and leave the 830 * rest for queuecommand. 831 */ 832 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); 833 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); 834 bf_set(lpfc_sli4_sge_last, sgl, 0); 835 sgl->word2 = cpu_to_le32(sgl->word2); 836 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); 837 sgl++; 838 839 /* Setup the physical region for the FCP RSP */ 840 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); 841 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); 842 bf_set(lpfc_sli4_sge_last, sgl, 1); 843 sgl->word2 = cpu_to_le32(sgl->word2); 844 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); 845 846 /* 847 * Since the IOCB for the FCP I/O is built into this 848 * lpfc_scsi_buf, initialize it with all known data now. 849 */ 850 iocb = &psb->cur_iocbq.iocb; 851 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 852 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 853 /* setting the BLP size to 2 * sizeof BDE may not be correct. 854 * We are setting the bpl to point to out sgl. An sgl's 855 * entries are 16 bytes, a bpl entries are 12 bytes. 856 */ 857 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); 858 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); 859 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); 860 iocb->ulpBdeCount = 1; 861 iocb->ulpLe = 1; 862 iocb->ulpClass = CLASS3; 863 psb->cur_iocbq.context1 = psb; 864 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 865 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; 866 else 867 pdma_phys_bpl1 = 0; 868 psb->dma_phys_bpl = pdma_phys_bpl; 869 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; 870 if (non_sequential_xri) { 871 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, 872 pdma_phys_bpl1, 873 psb->cur_iocbq.sli4_xritag); 874 if (status) { 875 /* Put this back on the abort scsi list */ 876 psb->exch_busy = 1; 877 } else { 878 psb->exch_busy = 0; 879 psb->status = IOSTAT_SUCCESS; 880 } 881 /* Put it back into the SCSI buffer list */ 882 lpfc_release_scsi_buf_s4(phba, psb); 883 break; 884 } 885 } 886 if (bcnt) { 887 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); 888 /* Reset SCSI buffer count for next round of posting */ 889 while (!list_empty(&sblist)) { 890 list_remove_head(&sblist, psb, struct lpfc_scsi_buf, 891 list); 892 if (status) { 893 /* Put this back on the abort scsi list */ 894 psb->exch_busy = 1; 895 } else { 896 psb->exch_busy = 0; 897 psb->status = IOSTAT_SUCCESS; 898 } 899 /* Put it back into the SCSI buffer list */ 900 lpfc_release_scsi_buf_s4(phba, psb); 901 } 902 } 903 904 return bcnt + non_sequential_xri; 905 } 906 907 /** 908 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator 909 * @vport: The virtual port for which this call being executed. 910 * @num_to_allocate: The requested number of buffers to allocate. 911 * 912 * This routine wraps the actual SCSI buffer allocator function pointer from 913 * the lpfc_hba struct. 914 * 915 * Return codes: 916 * int - number of scsi buffers that were allocated. 917 * 0 = failure, less than num_to_alloc is a partial failure. 918 **/ 919 static inline int 920 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) 921 { 922 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); 923 } 924 925 /** 926 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 927 * @phba: The HBA for which this call is being executed. 928 * 929 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 930 * and returns to caller. 931 * 932 * Return codes: 933 * NULL - Error 934 * Pointer to lpfc_scsi_buf - Success 935 **/ 936 static struct lpfc_scsi_buf* 937 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 938 { 939 struct lpfc_scsi_buf * lpfc_cmd = NULL; 940 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 941 unsigned long iflag = 0; 942 943 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 944 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 945 if (lpfc_cmd) { 946 lpfc_cmd->seg_cnt = 0; 947 lpfc_cmd->nonsg_phys = 0; 948 lpfc_cmd->prot_seg_cnt = 0; 949 } 950 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 951 return lpfc_cmd; 952 } 953 /** 954 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 955 * @phba: The HBA for which this call is being executed. 956 * 957 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 958 * and returns to caller. 959 * 960 * Return codes: 961 * NULL - Error 962 * Pointer to lpfc_scsi_buf - Success 963 **/ 964 static struct lpfc_scsi_buf* 965 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 966 { 967 struct lpfc_scsi_buf *lpfc_cmd = NULL; 968 struct lpfc_scsi_buf *start_lpfc_cmd = NULL; 969 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 970 unsigned long iflag = 0; 971 int found = 0; 972 973 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 974 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 975 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 976 while (!found && lpfc_cmd) { 977 if (lpfc_test_rrq_active(phba, ndlp, 978 lpfc_cmd->cur_iocbq.sli4_xritag)) { 979 lpfc_release_scsi_buf_s4(phba, lpfc_cmd); 980 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 981 list_remove_head(scsi_buf_list, lpfc_cmd, 982 struct lpfc_scsi_buf, list); 983 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, 984 iflag); 985 if (lpfc_cmd == start_lpfc_cmd) { 986 lpfc_cmd = NULL; 987 break; 988 } else 989 continue; 990 } 991 found = 1; 992 lpfc_cmd->seg_cnt = 0; 993 lpfc_cmd->nonsg_phys = 0; 994 lpfc_cmd->prot_seg_cnt = 0; 995 } 996 return lpfc_cmd; 997 } 998 /** 999 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA 1000 * @phba: The HBA for which this call is being executed. 1001 * 1002 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list 1003 * and returns to caller. 1004 * 1005 * Return codes: 1006 * NULL - Error 1007 * Pointer to lpfc_scsi_buf - Success 1008 **/ 1009 static struct lpfc_scsi_buf* 1010 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1011 { 1012 return phba->lpfc_get_scsi_buf(phba, ndlp); 1013 } 1014 1015 /** 1016 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list 1017 * @phba: The Hba for which this call is being executed. 1018 * @psb: The scsi buffer which is being released. 1019 * 1020 * This routine releases @psb scsi buffer by adding it to tail of @phba 1021 * lpfc_scsi_buf_list list. 1022 **/ 1023 static void 1024 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1025 { 1026 unsigned long iflag = 0; 1027 1028 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1029 psb->pCmd = NULL; 1030 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1031 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1032 } 1033 1034 /** 1035 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. 1036 * @phba: The Hba for which this call is being executed. 1037 * @psb: The scsi buffer which is being released. 1038 * 1039 * This routine releases @psb scsi buffer by adding it to tail of @phba 1040 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer 1041 * and cannot be reused for at least RA_TOV amount of time if it was 1042 * aborted. 1043 **/ 1044 static void 1045 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1046 { 1047 unsigned long iflag = 0; 1048 1049 if (psb->exch_busy) { 1050 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, 1051 iflag); 1052 psb->pCmd = NULL; 1053 list_add_tail(&psb->list, 1054 &phba->sli4_hba.lpfc_abts_scsi_buf_list); 1055 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, 1056 iflag); 1057 } else { 1058 1059 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 1060 psb->pCmd = NULL; 1061 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 1062 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 1063 } 1064 } 1065 1066 /** 1067 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. 1068 * @phba: The Hba for which this call is being executed. 1069 * @psb: The scsi buffer which is being released. 1070 * 1071 * This routine releases @psb scsi buffer by adding it to tail of @phba 1072 * lpfc_scsi_buf_list list. 1073 **/ 1074 static void 1075 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 1076 { 1077 1078 phba->lpfc_release_scsi_buf(phba, psb); 1079 } 1080 1081 /** 1082 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec 1083 * @phba: The Hba for which this call is being executed. 1084 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1085 * 1086 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1087 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans 1088 * through sg elements and format the bdea. This routine also initializes all 1089 * IOCB fields which are dependent on scsi command request buffer. 1090 * 1091 * Return codes: 1092 * 1 - Error 1093 * 0 - Success 1094 **/ 1095 static int 1096 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1097 { 1098 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1099 struct scatterlist *sgel = NULL; 1100 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1101 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1102 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; 1103 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1104 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; 1105 dma_addr_t physaddr; 1106 uint32_t num_bde = 0; 1107 int nseg, datadir = scsi_cmnd->sc_data_direction; 1108 1109 /* 1110 * There are three possibilities here - use scatter-gather segment, use 1111 * the single mapping, or neither. Start the lpfc command prep by 1112 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1113 * data bde entry. 1114 */ 1115 bpl += 2; 1116 if (scsi_sg_count(scsi_cmnd)) { 1117 /* 1118 * The driver stores the segment count returned from pci_map_sg 1119 * because this a count of dma-mappings used to map the use_sg 1120 * pages. They are not guaranteed to be the same for those 1121 * architectures that implement an IOMMU. 1122 */ 1123 1124 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 1125 scsi_sg_count(scsi_cmnd), datadir); 1126 if (unlikely(!nseg)) 1127 return 1; 1128 1129 lpfc_cmd->seg_cnt = nseg; 1130 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1131 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1132 "9064 BLKGRD: %s: Too many sg segments from " 1133 "dma_map_sg. Config %d, seg_cnt %d\n", 1134 __func__, phba->cfg_sg_seg_cnt, 1135 lpfc_cmd->seg_cnt); 1136 scsi_dma_unmap(scsi_cmnd); 1137 return 1; 1138 } 1139 1140 /* 1141 * The driver established a maximum scatter-gather segment count 1142 * during probe that limits the number of sg elements in any 1143 * single scsi command. Just run through the seg_cnt and format 1144 * the bde's. 1145 * When using SLI-3 the driver will try to fit all the BDEs into 1146 * the IOCB. If it can't then the BDEs get added to a BPL as it 1147 * does for SLI-2 mode. 1148 */ 1149 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 1150 physaddr = sg_dma_address(sgel); 1151 if (phba->sli_rev == 3 && 1152 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1153 !(iocbq->iocb_flag & DSS_SECURITY_OP) && 1154 nseg <= LPFC_EXT_DATA_BDE_COUNT) { 1155 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1156 data_bde->tus.f.bdeSize = sg_dma_len(sgel); 1157 data_bde->addrLow = putPaddrLow(physaddr); 1158 data_bde->addrHigh = putPaddrHigh(physaddr); 1159 data_bde++; 1160 } else { 1161 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1162 bpl->tus.f.bdeSize = sg_dma_len(sgel); 1163 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1164 bpl->addrLow = 1165 le32_to_cpu(putPaddrLow(physaddr)); 1166 bpl->addrHigh = 1167 le32_to_cpu(putPaddrHigh(physaddr)); 1168 bpl++; 1169 } 1170 } 1171 } 1172 1173 /* 1174 * Finish initializing those IOCB fields that are dependent on the 1175 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 1176 * explicitly reinitialized and for SLI-3 the extended bde count is 1177 * explicitly reinitialized since all iocb memory resources are reused. 1178 */ 1179 if (phba->sli_rev == 3 && 1180 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 1181 !(iocbq->iocb_flag & DSS_SECURITY_OP)) { 1182 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { 1183 /* 1184 * The extended IOCB format can only fit 3 BDE or a BPL. 1185 * This I/O has more than 3 BDE so the 1st data bde will 1186 * be a BPL that is filled in here. 1187 */ 1188 physaddr = lpfc_cmd->dma_handle; 1189 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64; 1190 data_bde->tus.f.bdeSize = (num_bde * 1191 sizeof(struct ulp_bde64)); 1192 physaddr += (sizeof(struct fcp_cmnd) + 1193 sizeof(struct fcp_rsp) + 1194 (2 * sizeof(struct ulp_bde64))); 1195 data_bde->addrHigh = putPaddrHigh(physaddr); 1196 data_bde->addrLow = putPaddrLow(physaddr); 1197 /* ebde count includes the responce bde and data bpl */ 1198 iocb_cmd->unsli3.fcp_ext.ebde_count = 2; 1199 } else { 1200 /* ebde count includes the responce bde and data bdes */ 1201 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1202 } 1203 } else { 1204 iocb_cmd->un.fcpi64.bdl.bdeSize = 1205 ((num_bde + 2) * sizeof(struct ulp_bde64)); 1206 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); 1207 } 1208 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 1209 1210 /* 1211 * Due to difference in data length between DIF/non-DIF paths, 1212 * we need to set word 4 of IOCB here 1213 */ 1214 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 1215 return 0; 1216 } 1217 1218 /* 1219 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it 1220 * @sc: The SCSI command to examine 1221 * @txopt: (out) BlockGuard operation for transmitted data 1222 * @rxopt: (out) BlockGuard operation for received data 1223 * 1224 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1225 * 1226 */ 1227 static int 1228 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1229 uint8_t *txop, uint8_t *rxop) 1230 { 1231 uint8_t guard_type = scsi_host_get_guard(sc->device->host); 1232 uint8_t ret = 0; 1233 1234 if (guard_type == SHOST_DIX_GUARD_IP) { 1235 switch (scsi_get_prot_op(sc)) { 1236 case SCSI_PROT_READ_INSERT: 1237 case SCSI_PROT_WRITE_STRIP: 1238 *txop = BG_OP_IN_CSUM_OUT_NODIF; 1239 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1240 break; 1241 1242 case SCSI_PROT_READ_STRIP: 1243 case SCSI_PROT_WRITE_INSERT: 1244 *txop = BG_OP_IN_NODIF_OUT_CRC; 1245 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1246 break; 1247 1248 case SCSI_PROT_READ_PASS: 1249 case SCSI_PROT_WRITE_PASS: 1250 *txop = BG_OP_IN_CSUM_OUT_CRC; 1251 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1252 break; 1253 1254 case SCSI_PROT_NORMAL: 1255 default: 1256 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1257 "9063 BLKGRD: Bad op/guard:%d/%d combination\n", 1258 scsi_get_prot_op(sc), guard_type); 1259 ret = 1; 1260 break; 1261 1262 } 1263 } else if (guard_type == SHOST_DIX_GUARD_CRC) { 1264 switch (scsi_get_prot_op(sc)) { 1265 case SCSI_PROT_READ_STRIP: 1266 case SCSI_PROT_WRITE_INSERT: 1267 *txop = BG_OP_IN_NODIF_OUT_CRC; 1268 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1269 break; 1270 1271 case SCSI_PROT_READ_PASS: 1272 case SCSI_PROT_WRITE_PASS: 1273 *txop = BG_OP_IN_CRC_OUT_CRC; 1274 *rxop = BG_OP_IN_CRC_OUT_CRC; 1275 break; 1276 1277 case SCSI_PROT_READ_INSERT: 1278 case SCSI_PROT_WRITE_STRIP: 1279 case SCSI_PROT_NORMAL: 1280 default: 1281 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1282 "9075 BLKGRD: Bad op/guard:%d/%d combination\n", 1283 scsi_get_prot_op(sc), guard_type); 1284 ret = 1; 1285 break; 1286 } 1287 } else { 1288 /* unsupported format */ 1289 BUG(); 1290 } 1291 1292 return ret; 1293 } 1294 1295 struct scsi_dif_tuple { 1296 __be16 guard_tag; /* Checksum */ 1297 __be16 app_tag; /* Opaque storage */ 1298 __be32 ref_tag; /* Target LBA or indirect LBA */ 1299 }; 1300 1301 static inline unsigned 1302 lpfc_cmd_blksize(struct scsi_cmnd *sc) 1303 { 1304 return sc->device->sector_size; 1305 } 1306 1307 /** 1308 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command 1309 * @sc: in: SCSI command 1310 * @apptagmask: out: app tag mask 1311 * @apptagval: out: app tag value 1312 * @reftag: out: ref tag (reference tag) 1313 * 1314 * Description: 1315 * Extract DIF parameters from the command if possible. Otherwise, 1316 * use default parameters. 1317 * 1318 **/ 1319 static inline void 1320 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, 1321 uint16_t *apptagval, uint32_t *reftag) 1322 { 1323 struct scsi_dif_tuple *spt; 1324 unsigned char op = scsi_get_prot_op(sc); 1325 unsigned int protcnt = scsi_prot_sg_count(sc); 1326 static int cnt; 1327 1328 if (protcnt && (op == SCSI_PROT_WRITE_STRIP || 1329 op == SCSI_PROT_WRITE_PASS)) { 1330 1331 cnt++; 1332 spt = page_address(sg_page(scsi_prot_sglist(sc))) + 1333 scsi_prot_sglist(sc)[0].offset; 1334 *apptagmask = 0; 1335 *apptagval = 0; 1336 *reftag = cpu_to_be32(spt->ref_tag); 1337 1338 } else { 1339 /* SBC defines ref tag to be lower 32bits of LBA */ 1340 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc)); 1341 *apptagmask = 0; 1342 *apptagval = 0; 1343 } 1344 } 1345 1346 /* 1347 * This function sets up buffer list for protection groups of 1348 * type LPFC_PG_TYPE_NO_DIF 1349 * 1350 * This is usually used when the HBA is instructed to generate 1351 * DIFs and insert them into data stream (or strip DIF from 1352 * incoming data stream) 1353 * 1354 * The buffer list consists of just one protection group described 1355 * below: 1356 * +-------------------------+ 1357 * start of prot group --> | PDE_5 | 1358 * +-------------------------+ 1359 * | PDE_6 | 1360 * +-------------------------+ 1361 * | Data BDE | 1362 * +-------------------------+ 1363 * |more Data BDE's ... (opt)| 1364 * +-------------------------+ 1365 * 1366 * @sc: pointer to scsi command we're working on 1367 * @bpl: pointer to buffer list for protection groups 1368 * @datacnt: number of segments of data that have been dma mapped 1369 * 1370 * Note: Data s/g buffers have been dma mapped 1371 */ 1372 static int 1373 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1374 struct ulp_bde64 *bpl, int datasegcnt) 1375 { 1376 struct scatterlist *sgde = NULL; /* s/g data entry */ 1377 struct lpfc_pde5 *pde5 = NULL; 1378 struct lpfc_pde6 *pde6 = NULL; 1379 dma_addr_t physaddr; 1380 int i = 0, num_bde = 0, status; 1381 int datadir = sc->sc_data_direction; 1382 unsigned blksize; 1383 uint32_t reftag; 1384 uint16_t apptagmask, apptagval; 1385 uint8_t txop, rxop; 1386 1387 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1388 if (status) 1389 goto out; 1390 1391 /* extract some info from the scsi command for pde*/ 1392 blksize = lpfc_cmd_blksize(sc); 1393 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1394 1395 /* setup PDE5 with what we have */ 1396 pde5 = (struct lpfc_pde5 *) bpl; 1397 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1398 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1399 pde5->reftag = reftag; 1400 1401 /* Endianness conversion if necessary for PDE5 */ 1402 pde5->word0 = cpu_to_le32(pde5->word0); 1403 pde5->reftag = cpu_to_le32(pde5->reftag); 1404 1405 /* advance bpl and increment bde count */ 1406 num_bde++; 1407 bpl++; 1408 pde6 = (struct lpfc_pde6 *) bpl; 1409 1410 /* setup PDE6 with the rest of the info */ 1411 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1412 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1413 bf_set(pde6_optx, pde6, txop); 1414 bf_set(pde6_oprx, pde6, rxop); 1415 if (datadir == DMA_FROM_DEVICE) { 1416 bf_set(pde6_ce, pde6, 1); 1417 bf_set(pde6_re, pde6, 1); 1418 bf_set(pde6_ae, pde6, 1); 1419 } 1420 bf_set(pde6_ai, pde6, 1); 1421 bf_set(pde6_apptagval, pde6, apptagval); 1422 1423 /* Endianness conversion if necessary for PDE6 */ 1424 pde6->word0 = cpu_to_le32(pde6->word0); 1425 pde6->word1 = cpu_to_le32(pde6->word1); 1426 pde6->word2 = cpu_to_le32(pde6->word2); 1427 1428 /* advance bpl and increment bde count */ 1429 num_bde++; 1430 bpl++; 1431 1432 /* assumption: caller has already run dma_map_sg on command data */ 1433 scsi_for_each_sg(sc, sgde, datasegcnt, i) { 1434 physaddr = sg_dma_address(sgde); 1435 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1436 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1437 bpl->tus.f.bdeSize = sg_dma_len(sgde); 1438 if (datadir == DMA_TO_DEVICE) 1439 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1440 else 1441 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1442 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1443 bpl++; 1444 num_bde++; 1445 } 1446 1447 out: 1448 return num_bde; 1449 } 1450 1451 /* 1452 * This function sets up buffer list for protection groups of 1453 * type LPFC_PG_TYPE_DIF_BUF 1454 * 1455 * This is usually used when DIFs are in their own buffers, 1456 * separate from the data. The HBA can then by instructed 1457 * to place the DIFs in the outgoing stream. For read operations, 1458 * The HBA could extract the DIFs and place it in DIF buffers. 1459 * 1460 * The buffer list for this type consists of one or more of the 1461 * protection groups described below: 1462 * +-------------------------+ 1463 * start of first prot group --> | PDE_5 | 1464 * +-------------------------+ 1465 * | PDE_6 | 1466 * +-------------------------+ 1467 * | PDE_7 (Prot BDE) | 1468 * +-------------------------+ 1469 * | Data BDE | 1470 * +-------------------------+ 1471 * |more Data BDE's ... (opt)| 1472 * +-------------------------+ 1473 * start of new prot group --> | PDE_5 | 1474 * +-------------------------+ 1475 * | ... | 1476 * +-------------------------+ 1477 * 1478 * @sc: pointer to scsi command we're working on 1479 * @bpl: pointer to buffer list for protection groups 1480 * @datacnt: number of segments of data that have been dma mapped 1481 * @protcnt: number of segment of protection data that have been dma mapped 1482 * 1483 * Note: It is assumed that both data and protection s/g buffers have been 1484 * mapped for DMA 1485 */ 1486 static int 1487 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1488 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1489 { 1490 struct scatterlist *sgde = NULL; /* s/g data entry */ 1491 struct scatterlist *sgpe = NULL; /* s/g prot entry */ 1492 struct lpfc_pde5 *pde5 = NULL; 1493 struct lpfc_pde6 *pde6 = NULL; 1494 struct ulp_bde64 *prot_bde = NULL; 1495 dma_addr_t dataphysaddr, protphysaddr; 1496 unsigned short curr_data = 0, curr_prot = 0; 1497 unsigned int split_offset, protgroup_len; 1498 unsigned int protgrp_blks, protgrp_bytes; 1499 unsigned int remainder, subtotal; 1500 int status; 1501 int datadir = sc->sc_data_direction; 1502 unsigned char pgdone = 0, alldone = 0; 1503 unsigned blksize; 1504 uint32_t reftag; 1505 uint16_t apptagmask, apptagval; 1506 uint8_t txop, rxop; 1507 int num_bde = 0; 1508 1509 sgpe = scsi_prot_sglist(sc); 1510 sgde = scsi_sglist(sc); 1511 1512 if (!sgpe || !sgde) { 1513 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1514 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", 1515 sgpe, sgde); 1516 return 0; 1517 } 1518 1519 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); 1520 if (status) 1521 goto out; 1522 1523 /* extract some info from the scsi command */ 1524 blksize = lpfc_cmd_blksize(sc); 1525 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); 1526 1527 split_offset = 0; 1528 do { 1529 /* setup PDE5 with what we have */ 1530 pde5 = (struct lpfc_pde5 *) bpl; 1531 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1532 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); 1533 pde5->reftag = reftag; 1534 1535 /* Endianness conversion if necessary for PDE5 */ 1536 pde5->word0 = cpu_to_le32(pde5->word0); 1537 pde5->reftag = cpu_to_le32(pde5->reftag); 1538 1539 /* advance bpl and increment bde count */ 1540 num_bde++; 1541 bpl++; 1542 pde6 = (struct lpfc_pde6 *) bpl; 1543 1544 /* setup PDE6 with the rest of the info */ 1545 memset(pde6, 0, sizeof(struct lpfc_pde6)); 1546 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1547 bf_set(pde6_optx, pde6, txop); 1548 bf_set(pde6_oprx, pde6, rxop); 1549 bf_set(pde6_ce, pde6, 1); 1550 bf_set(pde6_re, pde6, 1); 1551 bf_set(pde6_ae, pde6, 1); 1552 bf_set(pde6_ai, pde6, 1); 1553 bf_set(pde6_apptagval, pde6, apptagval); 1554 1555 /* Endianness conversion if necessary for PDE6 */ 1556 pde6->word0 = cpu_to_le32(pde6->word0); 1557 pde6->word1 = cpu_to_le32(pde6->word1); 1558 pde6->word2 = cpu_to_le32(pde6->word2); 1559 1560 /* advance bpl and increment bde count */ 1561 num_bde++; 1562 bpl++; 1563 1564 /* setup the first BDE that points to protection buffer */ 1565 prot_bde = (struct ulp_bde64 *) bpl; 1566 protphysaddr = sg_dma_address(sgpe); 1567 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); 1568 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); 1569 protgroup_len = sg_dma_len(sgpe); 1570 1571 /* must be integer multiple of the DIF block length */ 1572 BUG_ON(protgroup_len % 8); 1573 1574 protgrp_blks = protgroup_len / 8; 1575 protgrp_bytes = protgrp_blks * blksize; 1576 1577 prot_bde->tus.f.bdeSize = protgroup_len; 1578 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; 1579 prot_bde->tus.w = le32_to_cpu(bpl->tus.w); 1580 1581 curr_prot++; 1582 num_bde++; 1583 1584 /* setup BDE's for data blocks associated with DIF data */ 1585 pgdone = 0; 1586 subtotal = 0; /* total bytes processed for current prot grp */ 1587 while (!pgdone) { 1588 if (!sgde) { 1589 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1590 "9065 BLKGRD:%s Invalid data segment\n", 1591 __func__); 1592 return 0; 1593 } 1594 bpl++; 1595 dataphysaddr = sg_dma_address(sgde) + split_offset; 1596 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr)); 1597 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr)); 1598 1599 remainder = sg_dma_len(sgde) - split_offset; 1600 1601 if ((subtotal + remainder) <= protgrp_bytes) { 1602 /* we can use this whole buffer */ 1603 bpl->tus.f.bdeSize = remainder; 1604 split_offset = 0; 1605 1606 if ((subtotal + remainder) == protgrp_bytes) 1607 pgdone = 1; 1608 } else { 1609 /* must split this buffer with next prot grp */ 1610 bpl->tus.f.bdeSize = protgrp_bytes - subtotal; 1611 split_offset += bpl->tus.f.bdeSize; 1612 } 1613 1614 subtotal += bpl->tus.f.bdeSize; 1615 1616 if (datadir == DMA_TO_DEVICE) 1617 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 1618 else 1619 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; 1620 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1621 1622 num_bde++; 1623 curr_data++; 1624 1625 if (split_offset) 1626 break; 1627 1628 /* Move to the next s/g segment if possible */ 1629 sgde = sg_next(sgde); 1630 1631 } 1632 1633 /* are we done ? */ 1634 if (curr_prot == protcnt) { 1635 alldone = 1; 1636 } else if (curr_prot < protcnt) { 1637 /* advance to next prot buffer */ 1638 sgpe = sg_next(sgpe); 1639 bpl++; 1640 1641 /* update the reference tag */ 1642 reftag += protgrp_blks; 1643 } else { 1644 /* if we're here, we have a bug */ 1645 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1646 "9054 BLKGRD: bug in %s\n", __func__); 1647 } 1648 1649 } while (!alldone); 1650 1651 out: 1652 1653 return num_bde; 1654 } 1655 /* 1656 * Given a SCSI command that supports DIF, determine composition of protection 1657 * groups involved in setting up buffer lists 1658 * 1659 * Returns: 1660 * for DIF (for both read and write) 1661 * */ 1662 static int 1663 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 1664 { 1665 int ret = LPFC_PG_TYPE_INVALID; 1666 unsigned char op = scsi_get_prot_op(sc); 1667 1668 switch (op) { 1669 case SCSI_PROT_READ_STRIP: 1670 case SCSI_PROT_WRITE_INSERT: 1671 ret = LPFC_PG_TYPE_NO_DIF; 1672 break; 1673 case SCSI_PROT_READ_INSERT: 1674 case SCSI_PROT_WRITE_STRIP: 1675 case SCSI_PROT_READ_PASS: 1676 case SCSI_PROT_WRITE_PASS: 1677 ret = LPFC_PG_TYPE_DIF_BUF; 1678 break; 1679 default: 1680 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1681 "9021 Unsupported protection op:%d\n", op); 1682 break; 1683 } 1684 1685 return ret; 1686 } 1687 1688 /* 1689 * This is the protection/DIF aware version of 1690 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 1691 * two functions eventually, but for now, it's here 1692 */ 1693 static int 1694 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, 1695 struct lpfc_scsi_buf *lpfc_cmd) 1696 { 1697 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1698 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1699 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 1700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1701 uint32_t num_bde = 0; 1702 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; 1703 int prot_group_type = 0; 1704 int diflen, fcpdl; 1705 unsigned blksize; 1706 1707 /* 1708 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd 1709 * fcp_rsp regions to the first data bde entry 1710 */ 1711 bpl += 2; 1712 if (scsi_sg_count(scsi_cmnd)) { 1713 /* 1714 * The driver stores the segment count returned from pci_map_sg 1715 * because this a count of dma-mappings used to map the use_sg 1716 * pages. They are not guaranteed to be the same for those 1717 * architectures that implement an IOMMU. 1718 */ 1719 datasegcnt = dma_map_sg(&phba->pcidev->dev, 1720 scsi_sglist(scsi_cmnd), 1721 scsi_sg_count(scsi_cmnd), datadir); 1722 if (unlikely(!datasegcnt)) 1723 return 1; 1724 1725 lpfc_cmd->seg_cnt = datasegcnt; 1726 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 1727 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1728 "9067 BLKGRD: %s: Too many sg segments" 1729 " from dma_map_sg. Config %d, seg_cnt" 1730 " %d\n", 1731 __func__, phba->cfg_sg_seg_cnt, 1732 lpfc_cmd->seg_cnt); 1733 scsi_dma_unmap(scsi_cmnd); 1734 return 1; 1735 } 1736 1737 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); 1738 1739 switch (prot_group_type) { 1740 case LPFC_PG_TYPE_NO_DIF: 1741 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, 1742 datasegcnt); 1743 /* we should have 2 or more entries in buffer list */ 1744 if (num_bde < 2) 1745 goto err; 1746 break; 1747 case LPFC_PG_TYPE_DIF_BUF:{ 1748 /* 1749 * This type indicates that protection buffers are 1750 * passed to the driver, so that needs to be prepared 1751 * for DMA 1752 */ 1753 protsegcnt = dma_map_sg(&phba->pcidev->dev, 1754 scsi_prot_sglist(scsi_cmnd), 1755 scsi_prot_sg_count(scsi_cmnd), datadir); 1756 if (unlikely(!protsegcnt)) { 1757 scsi_dma_unmap(scsi_cmnd); 1758 return 1; 1759 } 1760 1761 lpfc_cmd->prot_seg_cnt = protsegcnt; 1762 if (lpfc_cmd->prot_seg_cnt 1763 > phba->cfg_prot_sg_seg_cnt) { 1764 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1765 "9068 BLKGRD: %s: Too many prot sg " 1766 "segments from dma_map_sg. Config %d," 1767 "prot_seg_cnt %d\n", __func__, 1768 phba->cfg_prot_sg_seg_cnt, 1769 lpfc_cmd->prot_seg_cnt); 1770 dma_unmap_sg(&phba->pcidev->dev, 1771 scsi_prot_sglist(scsi_cmnd), 1772 scsi_prot_sg_count(scsi_cmnd), 1773 datadir); 1774 scsi_dma_unmap(scsi_cmnd); 1775 return 1; 1776 } 1777 1778 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, 1779 datasegcnt, protsegcnt); 1780 /* we should have 3 or more entries in buffer list */ 1781 if (num_bde < 3) 1782 goto err; 1783 break; 1784 } 1785 case LPFC_PG_TYPE_INVALID: 1786 default: 1787 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1788 "9022 Unexpected protection group %i\n", 1789 prot_group_type); 1790 return 1; 1791 } 1792 } 1793 1794 /* 1795 * Finish initializing those IOCB fields that are dependent on the 1796 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 1797 * reinitialized since all iocb memory resources are used many times 1798 * for transmit, receive, and continuation bpl's. 1799 */ 1800 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 1801 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64)); 1802 iocb_cmd->ulpBdeCount = 1; 1803 iocb_cmd->ulpLe = 1; 1804 1805 fcpdl = scsi_bufflen(scsi_cmnd); 1806 1807 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { 1808 /* 1809 * We are in DIF Type 1 mode 1810 * Every data block has a 8 byte DIF (trailer) 1811 * attached to it. Must ajust FCP data length 1812 */ 1813 blksize = lpfc_cmd_blksize(scsi_cmnd); 1814 diflen = (fcpdl / blksize) * 8; 1815 fcpdl += diflen; 1816 } 1817 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); 1818 1819 /* 1820 * Due to difference in data length between DIF/non-DIF paths, 1821 * we need to set word 4 of IOCB here 1822 */ 1823 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 1824 1825 return 0; 1826 err: 1827 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1828 "9023 Could not setup all needed BDE's" 1829 "prot_group_type=%d, num_bde=%d\n", 1830 prot_group_type, num_bde); 1831 return 1; 1832 } 1833 1834 /* 1835 * This function checks for BlockGuard errors detected by 1836 * the HBA. In case of errors, the ASC/ASCQ fields in the 1837 * sense buffer will be set accordingly, paired with 1838 * ILLEGAL_REQUEST to signal to the kernel that the HBA 1839 * detected corruption. 1840 * 1841 * Returns: 1842 * 0 - No error found 1843 * 1 - BlockGuard error found 1844 * -1 - Internal error (bad profile, ...etc) 1845 */ 1846 static int 1847 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, 1848 struct lpfc_iocbq *pIocbOut) 1849 { 1850 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 1851 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; 1852 int ret = 0; 1853 uint32_t bghm = bgf->bghm; 1854 uint32_t bgstat = bgf->bgstat; 1855 uint64_t failing_sector = 0; 1856 1857 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" 1858 " 0x%x lba 0x%llx blk cnt 0x%x " 1859 "bgstat=0x%x bghm=0x%x\n", 1860 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1861 blk_rq_sectors(cmd->request), bgstat, bghm); 1862 1863 spin_lock(&_dump_buf_lock); 1864 if (!_dump_buf_done) { 1865 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" 1866 " Data for %u blocks to debugfs\n", 1867 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1868 lpfc_debug_save_data(phba, cmd); 1869 1870 /* If we have a prot sgl, save the DIF buffer */ 1871 if (lpfc_prot_group_type(phba, cmd) == 1872 LPFC_PG_TYPE_DIF_BUF) { 1873 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " 1874 "Saving DIF for %u blocks to debugfs\n", 1875 (cmd->cmnd[7] << 8 | cmd->cmnd[8])); 1876 lpfc_debug_save_dif(phba, cmd); 1877 } 1878 1879 _dump_buf_done = 1; 1880 } 1881 spin_unlock(&_dump_buf_lock); 1882 1883 if (lpfc_bgs_get_invalid_prof(bgstat)) { 1884 cmd->result = ScsiResult(DID_ERROR, 0); 1885 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" 1886 " BlockGuard profile. bgstat:0x%x\n", 1887 bgstat); 1888 ret = (-1); 1889 goto out; 1890 } 1891 1892 if (lpfc_bgs_get_uninit_dif_block(bgstat)) { 1893 cmd->result = ScsiResult(DID_ERROR, 0); 1894 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " 1895 "Invalid BlockGuard DIF Block. bgstat:0x%x\n", 1896 bgstat); 1897 ret = (-1); 1898 goto out; 1899 } 1900 1901 if (lpfc_bgs_get_guard_err(bgstat)) { 1902 ret = 1; 1903 1904 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1905 0x10, 0x1); 1906 cmd->result = DRIVER_SENSE << 24 1907 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1908 phba->bg_guard_err_cnt++; 1909 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1910 "9055 BLKGRD: guard_tag error\n"); 1911 } 1912 1913 if (lpfc_bgs_get_reftag_err(bgstat)) { 1914 ret = 1; 1915 1916 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1917 0x10, 0x3); 1918 cmd->result = DRIVER_SENSE << 24 1919 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1920 1921 phba->bg_reftag_err_cnt++; 1922 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1923 "9056 BLKGRD: ref_tag error\n"); 1924 } 1925 1926 if (lpfc_bgs_get_apptag_err(bgstat)) { 1927 ret = 1; 1928 1929 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1930 0x10, 0x2); 1931 cmd->result = DRIVER_SENSE << 24 1932 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); 1933 1934 phba->bg_apptag_err_cnt++; 1935 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1936 "9061 BLKGRD: app_tag error\n"); 1937 } 1938 1939 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { 1940 /* 1941 * setup sense data descriptor 0 per SPC-4 as an information 1942 * field, and put the failing LBA in it 1943 */ 1944 cmd->sense_buffer[8] = 0; /* Information */ 1945 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1946 bghm /= cmd->device->sector_size; 1947 1948 failing_sector = scsi_get_lba(cmd); 1949 failing_sector += bghm; 1950 1951 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]); 1952 } 1953 1954 if (!ret) { 1955 /* No error was reported - problem in FW? */ 1956 cmd->result = ScsiResult(DID_ERROR, 0); 1957 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1958 "9057 BLKGRD: no errors reported!\n"); 1959 } 1960 1961 out: 1962 return ret; 1963 } 1964 1965 /** 1966 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec 1967 * @phba: The Hba for which this call is being executed. 1968 * @lpfc_cmd: The scsi buffer which is going to be mapped. 1969 * 1970 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd 1971 * field of @lpfc_cmd for device with SLI-4 interface spec. 1972 * 1973 * Return codes: 1974 * 1 - Error 1975 * 0 - Success 1976 **/ 1977 static int 1978 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 1979 { 1980 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 1981 struct scatterlist *sgel = NULL; 1982 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 1983 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 1984 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 1985 dma_addr_t physaddr; 1986 uint32_t num_bde = 0; 1987 uint32_t dma_len; 1988 uint32_t dma_offset = 0; 1989 int nseg; 1990 1991 /* 1992 * There are three possibilities here - use scatter-gather segment, use 1993 * the single mapping, or neither. Start the lpfc command prep by 1994 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 1995 * data bde entry. 1996 */ 1997 if (scsi_sg_count(scsi_cmnd)) { 1998 /* 1999 * The driver stores the segment count returned from pci_map_sg 2000 * because this a count of dma-mappings used to map the use_sg 2001 * pages. They are not guaranteed to be the same for those 2002 * architectures that implement an IOMMU. 2003 */ 2004 2005 nseg = scsi_dma_map(scsi_cmnd); 2006 if (unlikely(!nseg)) 2007 return 1; 2008 sgl += 1; 2009 /* clear the last flag in the fcp_rsp map entry */ 2010 sgl->word2 = le32_to_cpu(sgl->word2); 2011 bf_set(lpfc_sli4_sge_last, sgl, 0); 2012 sgl->word2 = cpu_to_le32(sgl->word2); 2013 sgl += 1; 2014 2015 lpfc_cmd->seg_cnt = nseg; 2016 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 2017 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" 2018 " %s: Too many sg segments from " 2019 "dma_map_sg. Config %d, seg_cnt %d\n", 2020 __func__, phba->cfg_sg_seg_cnt, 2021 lpfc_cmd->seg_cnt); 2022 scsi_dma_unmap(scsi_cmnd); 2023 return 1; 2024 } 2025 2026 /* 2027 * The driver established a maximum scatter-gather segment count 2028 * during probe that limits the number of sg elements in any 2029 * single scsi command. Just run through the seg_cnt and format 2030 * the sge's. 2031 * When using SLI-3 the driver will try to fit all the BDEs into 2032 * the IOCB. If it can't then the BDEs get added to a BPL as it 2033 * does for SLI-2 mode. 2034 */ 2035 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { 2036 physaddr = sg_dma_address(sgel); 2037 dma_len = sg_dma_len(sgel); 2038 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); 2039 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); 2040 if ((num_bde + 1) == nseg) 2041 bf_set(lpfc_sli4_sge_last, sgl, 1); 2042 else 2043 bf_set(lpfc_sli4_sge_last, sgl, 0); 2044 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2045 sgl->word2 = cpu_to_le32(sgl->word2); 2046 sgl->sge_len = cpu_to_le32(dma_len); 2047 dma_offset += dma_len; 2048 sgl++; 2049 } 2050 } else { 2051 sgl += 1; 2052 /* clear the last flag in the fcp_rsp map entry */ 2053 sgl->word2 = le32_to_cpu(sgl->word2); 2054 bf_set(lpfc_sli4_sge_last, sgl, 1); 2055 sgl->word2 = cpu_to_le32(sgl->word2); 2056 } 2057 2058 /* 2059 * Finish initializing those IOCB fields that are dependent on the 2060 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is 2061 * explicitly reinitialized. 2062 * all iocb memory resources are reused. 2063 */ 2064 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 2065 2066 /* 2067 * Due to difference in data length between DIF/non-DIF paths, 2068 * we need to set word 4 of IOCB here 2069 */ 2070 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 2071 return 0; 2072 } 2073 2074 /** 2075 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 2076 * @phba: The Hba for which this call is being executed. 2077 * @lpfc_cmd: The scsi buffer which is going to be mapped. 2078 * 2079 * This routine wraps the actual DMA mapping function pointer from the 2080 * lpfc_hba struct. 2081 * 2082 * Return codes: 2083 * 1 - Error 2084 * 0 - Success 2085 **/ 2086 static inline int 2087 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 2088 { 2089 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 2090 } 2091 2092 /** 2093 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 2094 * @phba: Pointer to hba context object. 2095 * @vport: Pointer to vport object. 2096 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error. 2097 * @rsp_iocb: Pointer to response iocb object which reported error. 2098 * 2099 * This function posts an event when there is a SCSI command reporting 2100 * error from the scsi device. 2101 **/ 2102 static void 2103 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, 2104 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { 2105 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2106 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2107 uint32_t resp_info = fcprsp->rspStatus2; 2108 uint32_t scsi_status = fcprsp->rspStatus3; 2109 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2110 struct lpfc_fast_path_event *fast_path_evt = NULL; 2111 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; 2112 unsigned long flags; 2113 2114 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 2115 return; 2116 2117 /* If there is queuefull or busy condition send a scsi event */ 2118 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || 2119 (cmnd->result == SAM_STAT_BUSY)) { 2120 fast_path_evt = lpfc_alloc_fast_evt(phba); 2121 if (!fast_path_evt) 2122 return; 2123 fast_path_evt->un.scsi_evt.event_type = 2124 FC_REG_SCSI_EVENT; 2125 fast_path_evt->un.scsi_evt.subcategory = 2126 (cmnd->result == SAM_STAT_TASK_SET_FULL) ? 2127 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY; 2128 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun; 2129 memcpy(&fast_path_evt->un.scsi_evt.wwpn, 2130 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2131 memcpy(&fast_path_evt->un.scsi_evt.wwnn, 2132 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2133 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen && 2134 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) { 2135 fast_path_evt = lpfc_alloc_fast_evt(phba); 2136 if (!fast_path_evt) 2137 return; 2138 fast_path_evt->un.check_cond_evt.scsi_event.event_type = 2139 FC_REG_SCSI_EVENT; 2140 fast_path_evt->un.check_cond_evt.scsi_event.subcategory = 2141 LPFC_EVENT_CHECK_COND; 2142 fast_path_evt->un.check_cond_evt.scsi_event.lun = 2143 cmnd->device->lun; 2144 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn, 2145 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2146 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn, 2147 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2148 fast_path_evt->un.check_cond_evt.sense_key = 2149 cmnd->sense_buffer[2] & 0xf; 2150 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12]; 2151 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13]; 2152 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2153 fcpi_parm && 2154 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) || 2155 ((scsi_status == SAM_STAT_GOOD) && 2156 !(resp_info & (RESID_UNDER | RESID_OVER))))) { 2157 /* 2158 * If status is good or resid does not match with fcp_param and 2159 * there is valid fcpi_parm, then there is a read_check error 2160 */ 2161 fast_path_evt = lpfc_alloc_fast_evt(phba); 2162 if (!fast_path_evt) 2163 return; 2164 fast_path_evt->un.read_check_error.header.event_type = 2165 FC_REG_FABRIC_EVENT; 2166 fast_path_evt->un.read_check_error.header.subcategory = 2167 LPFC_EVENT_FCPRDCHKERR; 2168 memcpy(&fast_path_evt->un.read_check_error.header.wwpn, 2169 &pnode->nlp_portname, sizeof(struct lpfc_name)); 2170 memcpy(&fast_path_evt->un.read_check_error.header.wwnn, 2171 &pnode->nlp_nodename, sizeof(struct lpfc_name)); 2172 fast_path_evt->un.read_check_error.lun = cmnd->device->lun; 2173 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0]; 2174 fast_path_evt->un.read_check_error.fcpiparam = 2175 fcpi_parm; 2176 } else 2177 return; 2178 2179 fast_path_evt->vport = vport; 2180 spin_lock_irqsave(&phba->hbalock, flags); 2181 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); 2182 spin_unlock_irqrestore(&phba->hbalock, flags); 2183 lpfc_worker_wake_up(phba); 2184 return; 2185 } 2186 2187 /** 2188 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev 2189 * @phba: The HBA for which this call is being executed. 2190 * @psb: The scsi buffer which is going to be un-mapped. 2191 * 2192 * This routine does DMA un-mapping of scatter gather list of scsi command 2193 * field of @lpfc_cmd for device with SLI-3 interface spec. 2194 **/ 2195 static void 2196 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 2197 { 2198 /* 2199 * There are only two special cases to consider. (1) the scsi command 2200 * requested scatter-gather usage or (2) the scsi command allocated 2201 * a request buffer, but did not request use_sg. There is a third 2202 * case, but it does not require resource deallocation. 2203 */ 2204 if (psb->seg_cnt > 0) 2205 scsi_dma_unmap(psb->pCmd); 2206 if (psb->prot_seg_cnt > 0) 2207 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd), 2208 scsi_prot_sg_count(psb->pCmd), 2209 psb->pCmd->sc_data_direction); 2210 } 2211 2212 /** 2213 * lpfc_handler_fcp_err - FCP response handler 2214 * @vport: The virtual port for which this call is being executed. 2215 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2216 * @rsp_iocb: The response IOCB which contains FCP error. 2217 * 2218 * This routine is called to process response IOCB with status field 2219 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command 2220 * based upon SCSI and FCP error. 2221 **/ 2222 static void 2223 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2224 struct lpfc_iocbq *rsp_iocb) 2225 { 2226 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 2227 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 2228 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 2229 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 2230 uint32_t resp_info = fcprsp->rspStatus2; 2231 uint32_t scsi_status = fcprsp->rspStatus3; 2232 uint32_t *lp; 2233 uint32_t host_status = DID_OK; 2234 uint32_t rsplen = 0; 2235 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 2236 2237 2238 /* 2239 * If this is a task management command, there is no 2240 * scsi packet associated with this lpfc_cmd. The driver 2241 * consumes it. 2242 */ 2243 if (fcpcmd->fcpCntl2) { 2244 scsi_status = 0; 2245 goto out; 2246 } 2247 2248 if (resp_info & RSP_LEN_VALID) { 2249 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2250 if (rsplen != 0 && rsplen != 4 && rsplen != 8) { 2251 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2252 "2719 Invalid response length: " 2253 "tgt x%x lun x%x cmnd x%x rsplen x%x\n", 2254 cmnd->device->id, 2255 cmnd->device->lun, cmnd->cmnd[0], 2256 rsplen); 2257 host_status = DID_ERROR; 2258 goto out; 2259 } 2260 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { 2261 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 2262 "2757 Protocol failure detected during " 2263 "processing of FCP I/O op: " 2264 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", 2265 cmnd->device->id, 2266 cmnd->device->lun, cmnd->cmnd[0], 2267 fcprsp->rspInfo3); 2268 host_status = DID_ERROR; 2269 goto out; 2270 } 2271 } 2272 2273 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 2274 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 2275 if (snslen > SCSI_SENSE_BUFFERSIZE) 2276 snslen = SCSI_SENSE_BUFFERSIZE; 2277 2278 if (resp_info & RSP_LEN_VALID) 2279 rsplen = be32_to_cpu(fcprsp->rspRspLen); 2280 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 2281 } 2282 lp = (uint32_t *)cmnd->sense_buffer; 2283 2284 if (!scsi_status && (resp_info & RESID_UNDER)) 2285 logit = LOG_FCP; 2286 2287 lpfc_printf_vlog(vport, KERN_WARNING, logit, 2288 "9024 FCP command x%x failed: x%x SNS x%x x%x " 2289 "Data: x%x x%x x%x x%x x%x\n", 2290 cmnd->cmnd[0], scsi_status, 2291 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 2292 be32_to_cpu(fcprsp->rspResId), 2293 be32_to_cpu(fcprsp->rspSnsLen), 2294 be32_to_cpu(fcprsp->rspRspLen), 2295 fcprsp->rspInfo3); 2296 2297 scsi_set_resid(cmnd, 0); 2298 if (resp_info & RESID_UNDER) { 2299 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2300 2301 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2302 "9025 FCP Read Underrun, expected %d, " 2303 "residual %d Data: x%x x%x x%x\n", 2304 be32_to_cpu(fcpcmd->fcpDl), 2305 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 2306 cmnd->underflow); 2307 2308 /* 2309 * If there is an under run check if under run reported by 2310 * storage array is same as the under run reported by HBA. 2311 * If this is not same, there is a dropped frame. 2312 */ 2313 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 2314 fcpi_parm && 2315 (scsi_get_resid(cmnd) != fcpi_parm)) { 2316 lpfc_printf_vlog(vport, KERN_WARNING, 2317 LOG_FCP | LOG_FCP_ERROR, 2318 "9026 FCP Read Check Error " 2319 "and Underrun Data: x%x x%x x%x x%x\n", 2320 be32_to_cpu(fcpcmd->fcpDl), 2321 scsi_get_resid(cmnd), fcpi_parm, 2322 cmnd->cmnd[0]); 2323 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2324 host_status = DID_ERROR; 2325 } 2326 /* 2327 * The cmnd->underflow is the minimum number of bytes that must 2328 * be transfered for this command. Provided a sense condition 2329 * is not present, make sure the actual amount transferred is at 2330 * least the underflow value or fail. 2331 */ 2332 if (!(resp_info & SNS_LEN_VALID) && 2333 (scsi_status == SAM_STAT_GOOD) && 2334 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 2335 < cmnd->underflow)) { 2336 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2337 "9027 FCP command x%x residual " 2338 "underrun converted to error " 2339 "Data: x%x x%x x%x\n", 2340 cmnd->cmnd[0], scsi_bufflen(cmnd), 2341 scsi_get_resid(cmnd), cmnd->underflow); 2342 host_status = DID_ERROR; 2343 } 2344 } else if (resp_info & RESID_OVER) { 2345 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2346 "9028 FCP command x%x residual overrun error. " 2347 "Data: x%x x%x\n", cmnd->cmnd[0], 2348 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 2349 host_status = DID_ERROR; 2350 2351 /* 2352 * Check SLI validation that all the transfer was actually done 2353 * (fcpi_parm should be zero). Apply check only to reads. 2354 */ 2355 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 2356 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 2357 "9029 FCP Read Check Error Data: " 2358 "x%x x%x x%x x%x x%x\n", 2359 be32_to_cpu(fcpcmd->fcpDl), 2360 be32_to_cpu(fcprsp->rspResId), 2361 fcpi_parm, cmnd->cmnd[0], scsi_status); 2362 switch (scsi_status) { 2363 case SAM_STAT_GOOD: 2364 case SAM_STAT_CHECK_CONDITION: 2365 /* Fabric dropped a data frame. Fail any successful 2366 * command in which we detected dropped frames. 2367 * A status of good or some check conditions could 2368 * be considered a successful command. 2369 */ 2370 host_status = DID_ERROR; 2371 break; 2372 } 2373 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 2374 } 2375 2376 out: 2377 cmnd->result = ScsiResult(host_status, scsi_status); 2378 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); 2379 } 2380 2381 /** 2382 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine 2383 * @phba: The Hba for which this call is being executed. 2384 * @pIocbIn: The command IOCBQ for the scsi cmnd. 2385 * @pIocbOut: The response IOCBQ for the scsi cmnd. 2386 * 2387 * This routine assigns scsi command result by looking into response IOCB 2388 * status field appropriately. This routine handles QUEUE FULL condition as 2389 * well by ramping down device queue depth. 2390 **/ 2391 static void 2392 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 2393 struct lpfc_iocbq *pIocbOut) 2394 { 2395 struct lpfc_scsi_buf *lpfc_cmd = 2396 (struct lpfc_scsi_buf *) pIocbIn->context1; 2397 struct lpfc_vport *vport = pIocbIn->vport; 2398 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2399 struct lpfc_nodelist *pnode = rdata->pnode; 2400 struct scsi_cmnd *cmd; 2401 int result; 2402 struct scsi_device *tmp_sdev; 2403 int depth; 2404 unsigned long flags; 2405 struct lpfc_fast_path_event *fast_path_evt; 2406 struct Scsi_Host *shost; 2407 uint32_t queue_depth, scsi_id; 2408 2409 /* Sanity check on return of outstanding command */ 2410 if (!(lpfc_cmd->pCmd)) 2411 return; 2412 cmd = lpfc_cmd->pCmd; 2413 shost = cmd->device->host; 2414 2415 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 2416 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 2417 /* pick up SLI4 exhange busy status from HBA */ 2418 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 2419 2420 if (pnode && NLP_CHK_NODE_ACT(pnode)) 2421 atomic_dec(&pnode->cmd_pending); 2422 2423 if (lpfc_cmd->status) { 2424 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 2425 (lpfc_cmd->result & IOERR_DRVR_MASK)) 2426 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 2427 else if (lpfc_cmd->status >= IOSTAT_CNT) 2428 lpfc_cmd->status = IOSTAT_DEFAULT; 2429 2430 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2431 "9030 FCP cmd x%x failed <%d/%d> " 2432 "status: x%x result: x%x Data: x%x x%x\n", 2433 cmd->cmnd[0], 2434 cmd->device ? cmd->device->id : 0xffff, 2435 cmd->device ? cmd->device->lun : 0xffff, 2436 lpfc_cmd->status, lpfc_cmd->result, 2437 pIocbOut->iocb.ulpContext, 2438 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 2439 2440 switch (lpfc_cmd->status) { 2441 case IOSTAT_FCP_RSP_ERROR: 2442 /* Call FCP RSP handler to determine result */ 2443 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 2444 break; 2445 case IOSTAT_NPORT_BSY: 2446 case IOSTAT_FABRIC_BSY: 2447 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 2448 fast_path_evt = lpfc_alloc_fast_evt(phba); 2449 if (!fast_path_evt) 2450 break; 2451 fast_path_evt->un.fabric_evt.event_type = 2452 FC_REG_FABRIC_EVENT; 2453 fast_path_evt->un.fabric_evt.subcategory = 2454 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ? 2455 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY; 2456 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2457 memcpy(&fast_path_evt->un.fabric_evt.wwpn, 2458 &pnode->nlp_portname, 2459 sizeof(struct lpfc_name)); 2460 memcpy(&fast_path_evt->un.fabric_evt.wwnn, 2461 &pnode->nlp_nodename, 2462 sizeof(struct lpfc_name)); 2463 } 2464 fast_path_evt->vport = vport; 2465 fast_path_evt->work_evt.evt = 2466 LPFC_EVT_FASTPATH_MGMT_EVT; 2467 spin_lock_irqsave(&phba->hbalock, flags); 2468 list_add_tail(&fast_path_evt->work_evt.evt_listp, 2469 &phba->work_list); 2470 spin_unlock_irqrestore(&phba->hbalock, flags); 2471 lpfc_worker_wake_up(phba); 2472 break; 2473 case IOSTAT_LOCAL_REJECT: 2474 if (lpfc_cmd->result == IOERR_INVALID_RPI || 2475 lpfc_cmd->result == IOERR_NO_RESOURCES || 2476 lpfc_cmd->result == IOERR_ABORT_REQUESTED || 2477 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { 2478 cmd->result = ScsiResult(DID_REQUEUE, 0); 2479 break; 2480 } 2481 2482 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || 2483 lpfc_cmd->result == IOERR_TX_DMA_FAILED) && 2484 pIocbOut->iocb.unsli3.sli3_bg.bgstat) { 2485 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { 2486 /* 2487 * This is a response for a BG enabled 2488 * cmd. Parse BG error 2489 */ 2490 lpfc_parse_bg_err(phba, lpfc_cmd, 2491 pIocbOut); 2492 break; 2493 } else { 2494 lpfc_printf_vlog(vport, KERN_WARNING, 2495 LOG_BG, 2496 "9031 non-zero BGSTAT " 2497 "on unprotected cmd\n"); 2498 } 2499 } 2500 2501 /* else: fall through */ 2502 default: 2503 cmd->result = ScsiResult(DID_ERROR, 0); 2504 break; 2505 } 2506 2507 if (!pnode || !NLP_CHK_NODE_ACT(pnode) 2508 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 2509 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 2510 SAM_STAT_BUSY); 2511 } else { 2512 cmd->result = ScsiResult(DID_OK, 0); 2513 } 2514 2515 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 2516 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 2517 2518 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2519 "0710 Iodone <%d/%d> cmd %p, error " 2520 "x%x SNS x%x x%x Data: x%x x%x\n", 2521 cmd->device->id, cmd->device->lun, cmd, 2522 cmd->result, *lp, *(lp + 3), cmd->retries, 2523 scsi_get_resid(cmd)); 2524 } 2525 2526 lpfc_update_stats(phba, lpfc_cmd); 2527 result = cmd->result; 2528 if (vport->cfg_max_scsicmpl_time && 2529 time_after(jiffies, lpfc_cmd->start_time + 2530 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { 2531 spin_lock_irqsave(shost->host_lock, flags); 2532 if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2533 if (pnode->cmd_qdepth > 2534 atomic_read(&pnode->cmd_pending) && 2535 (atomic_read(&pnode->cmd_pending) > 2536 LPFC_MIN_TGT_QDEPTH) && 2537 ((cmd->cmnd[0] == READ_10) || 2538 (cmd->cmnd[0] == WRITE_10))) 2539 pnode->cmd_qdepth = 2540 atomic_read(&pnode->cmd_pending); 2541 2542 pnode->last_change_time = jiffies; 2543 } 2544 spin_unlock_irqrestore(shost->host_lock, flags); 2545 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { 2546 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) && 2547 time_after(jiffies, pnode->last_change_time + 2548 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) { 2549 spin_lock_irqsave(shost->host_lock, flags); 2550 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT 2551 / 100; 2552 depth = depth ? depth : 1; 2553 pnode->cmd_qdepth += depth; 2554 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth) 2555 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth; 2556 pnode->last_change_time = jiffies; 2557 spin_unlock_irqrestore(shost->host_lock, flags); 2558 } 2559 } 2560 2561 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 2562 2563 /* The sdev is not guaranteed to be valid post scsi_done upcall. */ 2564 queue_depth = cmd->device->queue_depth; 2565 scsi_id = cmd->device->id; 2566 cmd->scsi_done(cmd); 2567 2568 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2569 /* 2570 * If there is a thread waiting for command completion 2571 * wake up the thread. 2572 */ 2573 spin_lock_irqsave(shost->host_lock, flags); 2574 lpfc_cmd->pCmd = NULL; 2575 if (lpfc_cmd->waitq) 2576 wake_up(lpfc_cmd->waitq); 2577 spin_unlock_irqrestore(shost->host_lock, flags); 2578 lpfc_release_scsi_buf(phba, lpfc_cmd); 2579 return; 2580 } 2581 2582 if (!result) 2583 lpfc_rampup_queue_depth(vport, queue_depth); 2584 2585 /* 2586 * Check for queue full. If the lun is reporting queue full, then 2587 * back off the lun queue depth to prevent target overloads. 2588 */ 2589 if (result == SAM_STAT_TASK_SET_FULL && pnode && 2590 NLP_CHK_NODE_ACT(pnode)) { 2591 shost_for_each_device(tmp_sdev, shost) { 2592 if (tmp_sdev->id != scsi_id) 2593 continue; 2594 depth = scsi_track_queue_full(tmp_sdev, 2595 tmp_sdev->queue_depth-1); 2596 if (depth <= 0) 2597 continue; 2598 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2599 "0711 detected queue full - lun queue " 2600 "depth adjusted to %d.\n", depth); 2601 lpfc_send_sdev_queuedepth_change_event(phba, vport, 2602 pnode, 2603 tmp_sdev->lun, 2604 depth+1, depth); 2605 } 2606 } 2607 2608 /* 2609 * If there is a thread waiting for command completion 2610 * wake up the thread. 2611 */ 2612 spin_lock_irqsave(shost->host_lock, flags); 2613 lpfc_cmd->pCmd = NULL; 2614 if (lpfc_cmd->waitq) 2615 wake_up(lpfc_cmd->waitq); 2616 spin_unlock_irqrestore(shost->host_lock, flags); 2617 2618 lpfc_release_scsi_buf(phba, lpfc_cmd); 2619 } 2620 2621 /** 2622 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB 2623 * @data: A pointer to the immediate command data portion of the IOCB. 2624 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. 2625 * 2626 * The routine copies the entire FCP command from @fcp_cmnd to @data while 2627 * byte swapping the data to big endian format for transmission on the wire. 2628 **/ 2629 static void 2630 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) 2631 { 2632 int i, j; 2633 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); 2634 i += sizeof(uint32_t), j++) { 2635 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); 2636 } 2637 } 2638 2639 /** 2640 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit 2641 * @vport: The virtual port for which this call is being executed. 2642 * @lpfc_cmd: The scsi command which needs to send. 2643 * @pnode: Pointer to lpfc_nodelist. 2644 * 2645 * This routine initializes fcp_cmnd and iocb data structure from scsi command 2646 * to transfer for device with SLI3 interface spec. 2647 **/ 2648 static void 2649 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2650 struct lpfc_nodelist *pnode) 2651 { 2652 struct lpfc_hba *phba = vport->phba; 2653 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2654 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 2655 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 2656 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 2657 int datadir = scsi_cmnd->sc_data_direction; 2658 char tag[2]; 2659 2660 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 2661 return; 2662 2663 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 2664 /* clear task management bits */ 2665 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 2666 2667 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 2668 &lpfc_cmd->fcp_cmnd->fcp_lun); 2669 2670 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 2671 2672 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 2673 switch (tag[0]) { 2674 case HEAD_OF_QUEUE_TAG: 2675 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 2676 break; 2677 case ORDERED_QUEUE_TAG: 2678 fcp_cmnd->fcpCntl1 = ORDERED_Q; 2679 break; 2680 default: 2681 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 2682 break; 2683 } 2684 } else 2685 fcp_cmnd->fcpCntl1 = 0; 2686 2687 /* 2688 * There are three possibilities here - use scatter-gather segment, use 2689 * the single mapping, or neither. Start the lpfc command prep by 2690 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 2691 * data bde entry. 2692 */ 2693 if (scsi_sg_count(scsi_cmnd)) { 2694 if (datadir == DMA_TO_DEVICE) { 2695 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 2696 if (phba->sli_rev < LPFC_SLI_REV4) { 2697 iocb_cmd->un.fcpi.fcpi_parm = 0; 2698 iocb_cmd->ulpPU = 0; 2699 } else 2700 iocb_cmd->ulpPU = PARM_READ_CHECK; 2701 fcp_cmnd->fcpCntl3 = WRITE_DATA; 2702 phba->fc4OutputRequests++; 2703 } else { 2704 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 2705 iocb_cmd->ulpPU = PARM_READ_CHECK; 2706 fcp_cmnd->fcpCntl3 = READ_DATA; 2707 phba->fc4InputRequests++; 2708 } 2709 } else { 2710 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 2711 iocb_cmd->un.fcpi.fcpi_parm = 0; 2712 iocb_cmd->ulpPU = 0; 2713 fcp_cmnd->fcpCntl3 = 0; 2714 phba->fc4ControlRequests++; 2715 } 2716 if (phba->sli_rev == 3 && 2717 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2718 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); 2719 /* 2720 * Finish initializing those IOCB fields that are independent 2721 * of the scsi_cmnd request_buffer 2722 */ 2723 piocbq->iocb.ulpContext = pnode->nlp_rpi; 2724 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 2725 piocbq->iocb.ulpFCP2Rcvy = 1; 2726 else 2727 piocbq->iocb.ulpFCP2Rcvy = 0; 2728 2729 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 2730 piocbq->context1 = lpfc_cmd; 2731 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2732 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 2733 piocbq->vport = vport; 2734 } 2735 2736 /** 2737 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit 2738 * @vport: The virtual port for which this call is being executed. 2739 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2740 * @lun: Logical unit number. 2741 * @task_mgmt_cmd: SCSI task management command. 2742 * 2743 * This routine creates FCP information unit corresponding to @task_mgmt_cmd 2744 * for device with SLI-3 interface spec. 2745 * 2746 * Return codes: 2747 * 0 - Error 2748 * 1 - Success 2749 **/ 2750 static int 2751 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 2752 struct lpfc_scsi_buf *lpfc_cmd, 2753 unsigned int lun, 2754 uint8_t task_mgmt_cmd) 2755 { 2756 struct lpfc_iocbq *piocbq; 2757 IOCB_t *piocb; 2758 struct fcp_cmnd *fcp_cmnd; 2759 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 2760 struct lpfc_nodelist *ndlp = rdata->pnode; 2761 2762 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || 2763 ndlp->nlp_state != NLP_STE_MAPPED_NODE) 2764 return 0; 2765 2766 piocbq = &(lpfc_cmd->cur_iocbq); 2767 piocbq->vport = vport; 2768 2769 piocb = &piocbq->iocb; 2770 2771 fcp_cmnd = lpfc_cmd->fcp_cmnd; 2772 /* Clear out any old data in the FCP command area */ 2773 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2774 int_to_scsilun(lun, &fcp_cmnd->fcp_lun); 2775 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 2776 if (vport->phba->sli_rev == 3 && 2777 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED)) 2778 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd); 2779 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 2780 piocb->ulpContext = ndlp->nlp_rpi; 2781 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 2782 piocb->ulpFCP2Rcvy = 1; 2783 } 2784 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 2785 2786 /* ulpTimeout is only one byte */ 2787 if (lpfc_cmd->timeout > 0xff) { 2788 /* 2789 * Do not timeout the command at the firmware level. 2790 * The driver will provide the timeout mechanism. 2791 */ 2792 piocb->ulpTimeout = 0; 2793 } else 2794 piocb->ulpTimeout = lpfc_cmd->timeout; 2795 2796 if (vport->phba->sli_rev == LPFC_SLI_REV4) 2797 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd); 2798 2799 return 1; 2800 } 2801 2802 /** 2803 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table 2804 * @phba: The hba struct for which this call is being executed. 2805 * @dev_grp: The HBA PCI-Device group number. 2806 * 2807 * This routine sets up the SCSI interface API function jump table in @phba 2808 * struct. 2809 * Returns: 0 - success, -ENODEV - failure. 2810 **/ 2811 int 2812 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2813 { 2814 2815 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; 2816 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; 2817 2818 switch (dev_grp) { 2819 case LPFC_PCI_DEV_LP: 2820 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2821 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2822 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2823 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 2824 break; 2825 case LPFC_PCI_DEV_OC: 2826 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2827 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2828 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2829 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 2830 break; 2831 default: 2832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2833 "1418 Invalid HBA PCI-device group: 0x%x\n", 2834 dev_grp); 2835 return -ENODEV; 2836 break; 2837 } 2838 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; 2839 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 2840 return 0; 2841 } 2842 2843 /** 2844 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command 2845 * @phba: The Hba for which this call is being executed. 2846 * @cmdiocbq: Pointer to lpfc_iocbq data structure. 2847 * @rspiocbq: Pointer to lpfc_iocbq data structure. 2848 * 2849 * This routine is IOCB completion routine for device reset and target reset 2850 * routine. This routine release scsi buffer associated with lpfc_cmd. 2851 **/ 2852 static void 2853 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 2854 struct lpfc_iocbq *cmdiocbq, 2855 struct lpfc_iocbq *rspiocbq) 2856 { 2857 struct lpfc_scsi_buf *lpfc_cmd = 2858 (struct lpfc_scsi_buf *) cmdiocbq->context1; 2859 if (lpfc_cmd) 2860 lpfc_release_scsi_buf(phba, lpfc_cmd); 2861 return; 2862 } 2863 2864 /** 2865 * lpfc_info - Info entry point of scsi_host_template data structure 2866 * @host: The scsi host for which this call is being executed. 2867 * 2868 * This routine provides module information about hba. 2869 * 2870 * Reutrn code: 2871 * Pointer to char - Success. 2872 **/ 2873 const char * 2874 lpfc_info(struct Scsi_Host *host) 2875 { 2876 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 2877 struct lpfc_hba *phba = vport->phba; 2878 int len; 2879 static char lpfcinfobuf[384]; 2880 2881 memset(lpfcinfobuf,0,384); 2882 if (phba && phba->pcidev){ 2883 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 2884 len = strlen(lpfcinfobuf); 2885 snprintf(lpfcinfobuf + len, 2886 384-len, 2887 " on PCI bus %02x device %02x irq %d", 2888 phba->pcidev->bus->number, 2889 phba->pcidev->devfn, 2890 phba->pcidev->irq); 2891 len = strlen(lpfcinfobuf); 2892 if (phba->Port[0]) { 2893 snprintf(lpfcinfobuf + len, 2894 384-len, 2895 " port %s", 2896 phba->Port); 2897 } 2898 len = strlen(lpfcinfobuf); 2899 if (phba->sli4_hba.link_state.logical_speed) { 2900 snprintf(lpfcinfobuf + len, 2901 384-len, 2902 " Logical Link Speed: %d Mbps", 2903 phba->sli4_hba.link_state.logical_speed * 10); 2904 } 2905 } 2906 return lpfcinfobuf; 2907 } 2908 2909 /** 2910 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba 2911 * @phba: The Hba for which this call is being executed. 2912 * 2913 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo. 2914 * The default value of cfg_poll_tmo is 10 milliseconds. 2915 **/ 2916 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 2917 { 2918 unsigned long poll_tmo_expires = 2919 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 2920 2921 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 2922 mod_timer(&phba->fcp_poll_timer, 2923 poll_tmo_expires); 2924 } 2925 2926 /** 2927 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA 2928 * @phba: The Hba for which this call is being executed. 2929 * 2930 * This routine starts the fcp_poll_timer of @phba. 2931 **/ 2932 void lpfc_poll_start_timer(struct lpfc_hba * phba) 2933 { 2934 lpfc_poll_rearm_timer(phba); 2935 } 2936 2937 /** 2938 * lpfc_poll_timeout - Restart polling timer 2939 * @ptr: Map to lpfc_hba data structure pointer. 2940 * 2941 * This routine restarts fcp_poll timer, when FCP ring polling is enable 2942 * and FCP Ring interrupt is disable. 2943 **/ 2944 2945 void lpfc_poll_timeout(unsigned long ptr) 2946 { 2947 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2948 2949 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 2950 lpfc_sli_handle_fast_ring_event(phba, 2951 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 2952 2953 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 2954 lpfc_poll_rearm_timer(phba); 2955 } 2956 } 2957 2958 /** 2959 * lpfc_queuecommand - scsi_host_template queuecommand entry point 2960 * @cmnd: Pointer to scsi_cmnd data structure. 2961 * @done: Pointer to done routine. 2962 * 2963 * Driver registers this routine to scsi midlayer to submit a @cmd to process. 2964 * This routine prepares an IOCB from scsi command and provides to firmware. 2965 * The @done callback is invoked after driver finished processing the command. 2966 * 2967 * Return value : 2968 * 0 - Success 2969 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 2970 **/ 2971 static int 2972 lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 2973 { 2974 struct Scsi_Host *shost = cmnd->device->host; 2975 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2976 struct lpfc_hba *phba = vport->phba; 2977 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 2978 struct lpfc_nodelist *ndlp; 2979 struct lpfc_scsi_buf *lpfc_cmd; 2980 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 2981 int err; 2982 2983 err = fc_remote_port_chkready(rport); 2984 if (err) { 2985 cmnd->result = err; 2986 goto out_fail_command; 2987 } 2988 ndlp = rdata->pnode; 2989 2990 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 2991 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 2992 2993 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2994 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 2995 " op:%02x str=%s without registering for" 2996 " BlockGuard - Rejecting command\n", 2997 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 2998 dif_op_str[scsi_get_prot_op(cmnd)]); 2999 goto out_fail_command; 3000 } 3001 3002 /* 3003 * Catch race where our node has transitioned, but the 3004 * transport is still transitioning. 3005 */ 3006 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 3007 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); 3008 goto out_fail_command; 3009 } 3010 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) 3011 goto out_host_busy; 3012 3013 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); 3014 if (lpfc_cmd == NULL) { 3015 lpfc_rampdown_queue_depth(phba); 3016 3017 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3018 "0707 driver's buffer pool is empty, " 3019 "IO busied\n"); 3020 goto out_host_busy; 3021 } 3022 3023 /* 3024 * Store the midlayer's command structure for the completion phase 3025 * and complete the command initialization. 3026 */ 3027 lpfc_cmd->pCmd = cmnd; 3028 lpfc_cmd->rdata = rdata; 3029 lpfc_cmd->timeout = 0; 3030 lpfc_cmd->start_time = jiffies; 3031 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 3032 cmnd->scsi_done = done; 3033 3034 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 3035 if (vport->phba->cfg_enable_bg) { 3036 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3037 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 3038 "str=%s\n", 3039 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 3040 dif_op_str[scsi_get_prot_op(cmnd)]); 3041 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3042 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x " 3043 "%02x %02x %02x %02x %02x\n", 3044 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2], 3045 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5], 3046 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8], 3047 cmnd->cmnd[9]); 3048 if (cmnd->cmnd[0] == READ_10) 3049 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3050 "9035 BLKGRD: READ @ sector %llu, " 3051 "count %u\n", 3052 (unsigned long long)scsi_get_lba(cmnd), 3053 blk_rq_sectors(cmnd->request)); 3054 else if (cmnd->cmnd[0] == WRITE_10) 3055 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3056 "9036 BLKGRD: WRITE @ sector %llu, " 3057 "count %u cmd=%p\n", 3058 (unsigned long long)scsi_get_lba(cmnd), 3059 blk_rq_sectors(cmnd->request), 3060 cmnd); 3061 } 3062 3063 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 3064 } else { 3065 if (vport->phba->cfg_enable_bg) { 3066 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3067 "9038 BLKGRD: rcvd unprotected cmd:" 3068 "%02x op:%02x str=%s\n", 3069 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 3070 dif_op_str[scsi_get_prot_op(cmnd)]); 3071 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3072 "9039 BLKGRD: CDB: %02x %02x %02x " 3073 "%02x %02x %02x %02x %02x %02x %02x\n", 3074 cmnd->cmnd[0], cmnd->cmnd[1], 3075 cmnd->cmnd[2], cmnd->cmnd[3], 3076 cmnd->cmnd[4], cmnd->cmnd[5], 3077 cmnd->cmnd[6], cmnd->cmnd[7], 3078 cmnd->cmnd[8], cmnd->cmnd[9]); 3079 if (cmnd->cmnd[0] == READ_10) 3080 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3081 "9040 dbg: READ @ sector %llu, " 3082 "count %u\n", 3083 (unsigned long long)scsi_get_lba(cmnd), 3084 blk_rq_sectors(cmnd->request)); 3085 else if (cmnd->cmnd[0] == WRITE_10) 3086 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3087 "9041 dbg: WRITE @ sector %llu, " 3088 "count %u cmd=%p\n", 3089 (unsigned long long)scsi_get_lba(cmnd), 3090 blk_rq_sectors(cmnd->request), cmnd); 3091 else 3092 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 3093 "9042 dbg: parser not implemented\n"); 3094 } 3095 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 3096 } 3097 3098 if (err) 3099 goto out_host_busy_free_buf; 3100 3101 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 3102 3103 atomic_inc(&ndlp->cmd_pending); 3104 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, 3105 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 3106 if (err) { 3107 atomic_dec(&ndlp->cmd_pending); 3108 goto out_host_busy_free_buf; 3109 } 3110 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3111 spin_unlock(shost->host_lock); 3112 lpfc_sli_handle_fast_ring_event(phba, 3113 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3114 3115 spin_lock(shost->host_lock); 3116 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3117 lpfc_poll_rearm_timer(phba); 3118 } 3119 3120 return 0; 3121 3122 out_host_busy_free_buf: 3123 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 3124 lpfc_release_scsi_buf(phba, lpfc_cmd); 3125 out_host_busy: 3126 return SCSI_MLQUEUE_HOST_BUSY; 3127 3128 out_fail_command: 3129 done(cmnd); 3130 return 0; 3131 } 3132 3133 static DEF_SCSI_QCMD(lpfc_queuecommand) 3134 3135 /** 3136 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 3137 * @cmnd: Pointer to scsi_cmnd data structure. 3138 * 3139 * This routine aborts @cmnd pending in base driver. 3140 * 3141 * Return code : 3142 * 0x2003 - Error 3143 * 0x2002 - Success 3144 **/ 3145 static int 3146 lpfc_abort_handler(struct scsi_cmnd *cmnd) 3147 { 3148 struct Scsi_Host *shost = cmnd->device->host; 3149 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3150 struct lpfc_hba *phba = vport->phba; 3151 struct lpfc_iocbq *iocb; 3152 struct lpfc_iocbq *abtsiocb; 3153 struct lpfc_scsi_buf *lpfc_cmd; 3154 IOCB_t *cmd, *icmd; 3155 int ret = SUCCESS; 3156 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 3157 3158 ret = fc_block_scsi_eh(cmnd); 3159 if (ret) 3160 return ret; 3161 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 3162 if (!lpfc_cmd) { 3163 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3164 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 3165 "x%x ID %d " 3166 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3167 cmnd->device->lun, cmnd->serial_number); 3168 return SUCCESS; 3169 } 3170 3171 /* 3172 * If pCmd field of the corresponding lpfc_scsi_buf structure 3173 * points to a different SCSI command, then the driver has 3174 * already completed this command, but the midlayer did not 3175 * see the completion before the eh fired. Just return 3176 * SUCCESS. 3177 */ 3178 iocb = &lpfc_cmd->cur_iocbq; 3179 if (lpfc_cmd->pCmd != cmnd) 3180 goto out; 3181 3182 BUG_ON(iocb->context1 != lpfc_cmd); 3183 3184 abtsiocb = lpfc_sli_get_iocbq(phba); 3185 if (abtsiocb == NULL) { 3186 ret = FAILED; 3187 goto out; 3188 } 3189 3190 /* 3191 * The scsi command can not be in txq and it is in flight because the 3192 * pCmd is still pointig at the SCSI command we have to abort. There 3193 * is no need to search the txcmplq. Just send an abort to the FW. 3194 */ 3195 3196 cmd = &iocb->iocb; 3197 icmd = &abtsiocb->iocb; 3198 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 3199 icmd->un.acxri.abortContextTag = cmd->ulpContext; 3200 if (phba->sli_rev == LPFC_SLI_REV4) 3201 icmd->un.acxri.abortIoTag = iocb->sli4_xritag; 3202 else 3203 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 3204 3205 icmd->ulpLe = 1; 3206 icmd->ulpClass = cmd->ulpClass; 3207 3208 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 3209 abtsiocb->fcp_wqidx = iocb->fcp_wqidx; 3210 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 3211 3212 if (lpfc_is_link_up(phba)) 3213 icmd->ulpCommand = CMD_ABORT_XRI_CN; 3214 else 3215 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 3216 3217 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3218 abtsiocb->vport = vport; 3219 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == 3220 IOCB_ERROR) { 3221 lpfc_sli_release_iocbq(phba, abtsiocb); 3222 ret = FAILED; 3223 goto out; 3224 } 3225 3226 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3227 lpfc_sli_handle_fast_ring_event(phba, 3228 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3229 3230 lpfc_cmd->waitq = &waitq; 3231 /* Wait for abort to complete */ 3232 wait_event_timeout(waitq, 3233 (lpfc_cmd->pCmd != cmnd), 3234 (2*vport->cfg_devloss_tmo*HZ)); 3235 3236 spin_lock_irq(shost->host_lock); 3237 lpfc_cmd->waitq = NULL; 3238 spin_unlock_irq(shost->host_lock); 3239 3240 if (lpfc_cmd->pCmd == cmnd) { 3241 ret = FAILED; 3242 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3243 "0748 abort handler timed out waiting " 3244 "for abort to complete: ret %#x, ID %d, " 3245 "LUN %d, snum %#lx\n", 3246 ret, cmnd->device->id, cmnd->device->lun, 3247 cmnd->serial_number); 3248 } 3249 3250 out: 3251 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3252 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 3253 "LUN %d snum %#lx\n", ret, cmnd->device->id, 3254 cmnd->device->lun, cmnd->serial_number); 3255 return ret; 3256 } 3257 3258 static char * 3259 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) 3260 { 3261 switch (task_mgmt_cmd) { 3262 case FCP_ABORT_TASK_SET: 3263 return "ABORT_TASK_SET"; 3264 case FCP_CLEAR_TASK_SET: 3265 return "FCP_CLEAR_TASK_SET"; 3266 case FCP_BUS_RESET: 3267 return "FCP_BUS_RESET"; 3268 case FCP_LUN_RESET: 3269 return "FCP_LUN_RESET"; 3270 case FCP_TARGET_RESET: 3271 return "FCP_TARGET_RESET"; 3272 case FCP_CLEAR_ACA: 3273 return "FCP_CLEAR_ACA"; 3274 case FCP_TERMINATE_TASK: 3275 return "FCP_TERMINATE_TASK"; 3276 default: 3277 return "unknown"; 3278 } 3279 } 3280 3281 /** 3282 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler 3283 * @vport: The virtual port for which this call is being executed. 3284 * @rdata: Pointer to remote port local data 3285 * @tgt_id: Target ID of remote device. 3286 * @lun_id: Lun number for the TMF 3287 * @task_mgmt_cmd: type of TMF to send 3288 * 3289 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to 3290 * a remote port. 3291 * 3292 * Return Code: 3293 * 0x2003 - Error 3294 * 0x2002 - Success. 3295 **/ 3296 static int 3297 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, 3298 unsigned tgt_id, unsigned int lun_id, 3299 uint8_t task_mgmt_cmd) 3300 { 3301 struct lpfc_hba *phba = vport->phba; 3302 struct lpfc_scsi_buf *lpfc_cmd; 3303 struct lpfc_iocbq *iocbq; 3304 struct lpfc_iocbq *iocbqrsp; 3305 struct lpfc_nodelist *pnode = rdata->pnode; 3306 int ret; 3307 int status; 3308 3309 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3310 return FAILED; 3311 3312 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); 3313 if (lpfc_cmd == NULL) 3314 return FAILED; 3315 lpfc_cmd->timeout = 60; 3316 lpfc_cmd->rdata = rdata; 3317 3318 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, 3319 task_mgmt_cmd); 3320 if (!status) { 3321 lpfc_release_scsi_buf(phba, lpfc_cmd); 3322 return FAILED; 3323 } 3324 3325 iocbq = &lpfc_cmd->cur_iocbq; 3326 iocbqrsp = lpfc_sli_get_iocbq(phba); 3327 if (iocbqrsp == NULL) { 3328 lpfc_release_scsi_buf(phba, lpfc_cmd); 3329 return FAILED; 3330 } 3331 3332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3333 "0702 Issue %s to TGT %d LUN %d " 3334 "rpi x%x nlp_flag x%x\n", 3335 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, 3336 pnode->nlp_rpi, pnode->nlp_flag); 3337 3338 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3339 iocbq, iocbqrsp, lpfc_cmd->timeout); 3340 if (status != IOCB_SUCCESS) { 3341 if (status == IOCB_TIMEDOUT) { 3342 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3343 ret = TIMEOUT_ERROR; 3344 } else 3345 ret = FAILED; 3346 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3347 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3348 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n", 3349 lpfc_taskmgmt_name(task_mgmt_cmd), 3350 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, 3351 iocbqrsp->iocb.un.ulpWord[4]); 3352 } else if (status == IOCB_BUSY) 3353 ret = FAILED; 3354 else 3355 ret = SUCCESS; 3356 3357 lpfc_sli_release_iocbq(phba, iocbqrsp); 3358 3359 if (ret != TIMEOUT_ERROR) 3360 lpfc_release_scsi_buf(phba, lpfc_cmd); 3361 3362 return ret; 3363 } 3364 3365 /** 3366 * lpfc_chk_tgt_mapped - 3367 * @vport: The virtual port to check on 3368 * @cmnd: Pointer to scsi_cmnd data structure. 3369 * 3370 * This routine delays until the scsi target (aka rport) for the 3371 * command exists (is present and logged in) or we declare it non-existent. 3372 * 3373 * Return code : 3374 * 0x2003 - Error 3375 * 0x2002 - Success 3376 **/ 3377 static int 3378 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) 3379 { 3380 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3381 struct lpfc_nodelist *pnode; 3382 unsigned long later; 3383 3384 if (!rdata) { 3385 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3386 "0797 Tgt Map rport failure: rdata x%p\n", rdata); 3387 return FAILED; 3388 } 3389 pnode = rdata->pnode; 3390 /* 3391 * If target is not in a MAPPED state, delay until 3392 * target is rediscovered or devloss timeout expires. 3393 */ 3394 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3395 while (time_after(later, jiffies)) { 3396 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3397 return FAILED; 3398 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 3399 return SUCCESS; 3400 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 3401 rdata = cmnd->device->hostdata; 3402 if (!rdata) 3403 return FAILED; 3404 pnode = rdata->pnode; 3405 } 3406 if (!pnode || !NLP_CHK_NODE_ACT(pnode) || 3407 (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 3408 return FAILED; 3409 return SUCCESS; 3410 } 3411 3412 /** 3413 * lpfc_reset_flush_io_context - 3414 * @vport: The virtual port (scsi_host) for the flush context 3415 * @tgt_id: If aborting by Target contect - specifies the target id 3416 * @lun_id: If aborting by Lun context - specifies the lun id 3417 * @context: specifies the context level to flush at. 3418 * 3419 * After a reset condition via TMF, we need to flush orphaned i/o 3420 * contexts from the adapter. This routine aborts any contexts 3421 * outstanding, then waits for their completions. The wait is 3422 * bounded by devloss_tmo though. 3423 * 3424 * Return code : 3425 * 0x2003 - Error 3426 * 0x2002 - Success 3427 **/ 3428 static int 3429 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, 3430 uint64_t lun_id, lpfc_ctx_cmd context) 3431 { 3432 struct lpfc_hba *phba = vport->phba; 3433 unsigned long later; 3434 int cnt; 3435 3436 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3437 if (cnt) 3438 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3439 tgt_id, lun_id, context); 3440 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3441 while (time_after(later, jiffies) && cnt) { 3442 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3443 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); 3444 } 3445 if (cnt) { 3446 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3447 "0724 I/O flush failure for context %s : cnt x%x\n", 3448 ((context == LPFC_CTX_LUN) ? "LUN" : 3449 ((context == LPFC_CTX_TGT) ? "TGT" : 3450 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))), 3451 cnt); 3452 return FAILED; 3453 } 3454 return SUCCESS; 3455 } 3456 3457 /** 3458 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3459 * @cmnd: Pointer to scsi_cmnd data structure. 3460 * 3461 * This routine does a device reset by sending a LUN_RESET task management 3462 * command. 3463 * 3464 * Return code : 3465 * 0x2003 - Error 3466 * 0x2002 - Success 3467 **/ 3468 static int 3469 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3470 { 3471 struct Scsi_Host *shost = cmnd->device->host; 3472 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3473 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3474 struct lpfc_nodelist *pnode; 3475 unsigned tgt_id = cmnd->device->id; 3476 unsigned int lun_id = cmnd->device->lun; 3477 struct lpfc_scsi_event_header scsi_event; 3478 int status; 3479 3480 if (!rdata) { 3481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3482 "0798 Device Reset rport failure: rdata x%p\n", rdata); 3483 return FAILED; 3484 } 3485 pnode = rdata->pnode; 3486 status = fc_block_scsi_eh(cmnd); 3487 if (status) 3488 return status; 3489 3490 status = lpfc_chk_tgt_mapped(vport, cmnd); 3491 if (status == FAILED) { 3492 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3493 "0721 Device Reset rport failure: rdata x%p\n", rdata); 3494 return FAILED; 3495 } 3496 3497 scsi_event.event_type = FC_REG_SCSI_EVENT; 3498 scsi_event.subcategory = LPFC_EVENT_LUNRESET; 3499 scsi_event.lun = lun_id; 3500 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3501 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3502 3503 fc_host_post_vendor_event(shost, fc_get_event_number(), 3504 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3505 3506 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3507 FCP_LUN_RESET); 3508 3509 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3510 "0713 SCSI layer issued Device Reset (%d, %d) " 3511 "return x%x\n", tgt_id, lun_id, status); 3512 3513 /* 3514 * We have to clean up i/o as : they may be orphaned by the TMF; 3515 * or if the TMF failed, they may be in an indeterminate state. 3516 * So, continue on. 3517 * We will report success if all the i/o aborts successfully. 3518 */ 3519 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3520 LPFC_CTX_LUN); 3521 return status; 3522 } 3523 3524 /** 3525 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point 3526 * @cmnd: Pointer to scsi_cmnd data structure. 3527 * 3528 * This routine does a target reset by sending a TARGET_RESET task management 3529 * command. 3530 * 3531 * Return code : 3532 * 0x2003 - Error 3533 * 0x2002 - Success 3534 **/ 3535 static int 3536 lpfc_target_reset_handler(struct scsi_cmnd *cmnd) 3537 { 3538 struct Scsi_Host *shost = cmnd->device->host; 3539 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3540 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3541 struct lpfc_nodelist *pnode; 3542 unsigned tgt_id = cmnd->device->id; 3543 unsigned int lun_id = cmnd->device->lun; 3544 struct lpfc_scsi_event_header scsi_event; 3545 int status; 3546 3547 if (!rdata) { 3548 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3549 "0799 Target Reset rport failure: rdata x%p\n", rdata); 3550 return FAILED; 3551 } 3552 pnode = rdata->pnode; 3553 status = fc_block_scsi_eh(cmnd); 3554 if (status) 3555 return status; 3556 3557 status = lpfc_chk_tgt_mapped(vport, cmnd); 3558 if (status == FAILED) { 3559 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3560 "0722 Target Reset rport failure: rdata x%p\n", rdata); 3561 return FAILED; 3562 } 3563 3564 scsi_event.event_type = FC_REG_SCSI_EVENT; 3565 scsi_event.subcategory = LPFC_EVENT_TGTRESET; 3566 scsi_event.lun = 0; 3567 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name)); 3568 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name)); 3569 3570 fc_host_post_vendor_event(shost, fc_get_event_number(), 3571 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3572 3573 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id, 3574 FCP_TARGET_RESET); 3575 3576 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3577 "0723 SCSI layer issued Target Reset (%d, %d) " 3578 "return x%x\n", tgt_id, lun_id, status); 3579 3580 /* 3581 * We have to clean up i/o as : they may be orphaned by the TMF; 3582 * or if the TMF failed, they may be in an indeterminate state. 3583 * So, continue on. 3584 * We will report success if all the i/o aborts successfully. 3585 */ 3586 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, 3587 LPFC_CTX_TGT); 3588 return status; 3589 } 3590 3591 /** 3592 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3593 * @cmnd: Pointer to scsi_cmnd data structure. 3594 * 3595 * This routine does target reset to all targets on @cmnd->device->host. 3596 * This emulates Parallel SCSI Bus Reset Semantics. 3597 * 3598 * Return code : 3599 * 0x2003 - Error 3600 * 0x2002 - Success 3601 **/ 3602 static int 3603 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3604 { 3605 struct Scsi_Host *shost = cmnd->device->host; 3606 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3607 struct lpfc_nodelist *ndlp = NULL; 3608 struct lpfc_scsi_event_header scsi_event; 3609 int match; 3610 int ret = SUCCESS, status, i; 3611 3612 scsi_event.event_type = FC_REG_SCSI_EVENT; 3613 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3614 scsi_event.lun = 0; 3615 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3616 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3617 3618 fc_host_post_vendor_event(shost, fc_get_event_number(), 3619 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3620 3621 ret = fc_block_scsi_eh(cmnd); 3622 if (ret) 3623 return ret; 3624 3625 /* 3626 * Since the driver manages a single bus device, reset all 3627 * targets known to the driver. Should any target reset 3628 * fail, this routine returns failure to the midlayer. 3629 */ 3630 for (i = 0; i < LPFC_MAX_TARGET; i++) { 3631 /* Search for mapped node by target ID */ 3632 match = 0; 3633 spin_lock_irq(shost->host_lock); 3634 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3635 if (!NLP_CHK_NODE_ACT(ndlp)) 3636 continue; 3637 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 3638 ndlp->nlp_sid == i && 3639 ndlp->rport) { 3640 match = 1; 3641 break; 3642 } 3643 } 3644 spin_unlock_irq(shost->host_lock); 3645 if (!match) 3646 continue; 3647 3648 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data, 3649 i, 0, FCP_TARGET_RESET); 3650 3651 if (status != SUCCESS) { 3652 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3653 "0700 Bus Reset on target %d failed\n", 3654 i); 3655 ret = FAILED; 3656 } 3657 } 3658 /* 3659 * We have to clean up i/o as : they may be orphaned by the TMFs 3660 * above; or if any of the TMFs failed, they may be in an 3661 * indeterminate state. 3662 * We will report success if all the i/o aborts successfully. 3663 */ 3664 3665 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST); 3666 if (status != SUCCESS) 3667 ret = FAILED; 3668 3669 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3670 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3671 return ret; 3672 } 3673 3674 /** 3675 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point 3676 * @sdev: Pointer to scsi_device. 3677 * 3678 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's 3679 * globally available list of scsi buffers. This routine also makes sure scsi 3680 * buffer is not allocated more than HBA limit conveyed to midlayer. This list 3681 * of scsi buffer exists for the lifetime of the driver. 3682 * 3683 * Return codes: 3684 * non-0 - Error 3685 * 0 - Success 3686 **/ 3687 static int 3688 lpfc_slave_alloc(struct scsi_device *sdev) 3689 { 3690 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3691 struct lpfc_hba *phba = vport->phba; 3692 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 3693 uint32_t total = 0; 3694 uint32_t num_to_alloc = 0; 3695 int num_allocated = 0; 3696 uint32_t sdev_cnt; 3697 3698 if (!rport || fc_remote_port_chkready(rport)) 3699 return -ENXIO; 3700 3701 sdev->hostdata = rport->dd_data; 3702 sdev_cnt = atomic_inc_return(&phba->sdev_cnt); 3703 3704 /* 3705 * Populate the cmds_per_lun count scsi_bufs into this host's globally 3706 * available list of scsi buffers. Don't allocate more than the 3707 * HBA limit conveyed to the midlayer via the host structure. The 3708 * formula accounts for the lun_queue_depth + error handlers + 1 3709 * extra. This list of scsi bufs exists for the lifetime of the driver. 3710 */ 3711 total = phba->total_scsi_bufs; 3712 num_to_alloc = vport->cfg_lun_queue_depth + 2; 3713 3714 /* If allocated buffers are enough do nothing */ 3715 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total) 3716 return 0; 3717 3718 /* Allow some exchanges to be available always to complete discovery */ 3719 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3720 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3721 "0704 At limitation of %d preallocated " 3722 "command buffers\n", total); 3723 return 0; 3724 /* Allow some exchanges to be available always to complete discovery */ 3725 } else if (total + num_to_alloc > 3726 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 3727 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3728 "0705 Allocation request of %d " 3729 "command buffers will exceed max of %d. " 3730 "Reducing allocation request to %d.\n", 3731 num_to_alloc, phba->cfg_hba_queue_depth, 3732 (phba->cfg_hba_queue_depth - total)); 3733 num_to_alloc = phba->cfg_hba_queue_depth - total; 3734 } 3735 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); 3736 if (num_to_alloc != num_allocated) { 3737 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 3738 "0708 Allocation request of %d " 3739 "command buffers did not succeed. " 3740 "Allocated %d buffers.\n", 3741 num_to_alloc, num_allocated); 3742 } 3743 if (num_allocated > 0) 3744 phba->total_scsi_bufs += num_allocated; 3745 return 0; 3746 } 3747 3748 /** 3749 * lpfc_slave_configure - scsi_host_template slave_configure entry point 3750 * @sdev: Pointer to scsi_device. 3751 * 3752 * This routine configures following items 3753 * - Tag command queuing support for @sdev if supported. 3754 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. 3755 * 3756 * Return codes: 3757 * 0 - Success 3758 **/ 3759 static int 3760 lpfc_slave_configure(struct scsi_device *sdev) 3761 { 3762 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3763 struct lpfc_hba *phba = vport->phba; 3764 3765 if (sdev->tagged_supported) 3766 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 3767 else 3768 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 3769 3770 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3771 lpfc_sli_handle_fast_ring_event(phba, 3772 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 3773 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 3774 lpfc_poll_rearm_timer(phba); 3775 } 3776 3777 return 0; 3778 } 3779 3780 /** 3781 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure 3782 * @sdev: Pointer to scsi_device. 3783 * 3784 * This routine sets @sdev hostatdata filed to null. 3785 **/ 3786 static void 3787 lpfc_slave_destroy(struct scsi_device *sdev) 3788 { 3789 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 3790 struct lpfc_hba *phba = vport->phba; 3791 atomic_dec(&phba->sdev_cnt); 3792 sdev->hostdata = NULL; 3793 return; 3794 } 3795 3796 3797 struct scsi_host_template lpfc_template = { 3798 .module = THIS_MODULE, 3799 .name = LPFC_DRIVER_NAME, 3800 .info = lpfc_info, 3801 .queuecommand = lpfc_queuecommand, 3802 .eh_abort_handler = lpfc_abort_handler, 3803 .eh_device_reset_handler = lpfc_device_reset_handler, 3804 .eh_target_reset_handler = lpfc_target_reset_handler, 3805 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3806 .slave_alloc = lpfc_slave_alloc, 3807 .slave_configure = lpfc_slave_configure, 3808 .slave_destroy = lpfc_slave_destroy, 3809 .scan_finished = lpfc_scan_finished, 3810 .this_id = -1, 3811 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3812 .cmd_per_lun = LPFC_CMD_PER_LUN, 3813 .use_clustering = ENABLE_CLUSTERING, 3814 .shost_attrs = lpfc_hba_attrs, 3815 .max_sectors = 0xFFFF, 3816 .vendor_id = LPFC_NL_VENDOR_ID, 3817 .change_queue_depth = lpfc_change_queue_depth, 3818 }; 3819 3820 struct scsi_host_template lpfc_vport_template = { 3821 .module = THIS_MODULE, 3822 .name = LPFC_DRIVER_NAME, 3823 .info = lpfc_info, 3824 .queuecommand = lpfc_queuecommand, 3825 .eh_abort_handler = lpfc_abort_handler, 3826 .eh_device_reset_handler = lpfc_device_reset_handler, 3827 .eh_target_reset_handler = lpfc_target_reset_handler, 3828 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3829 .slave_alloc = lpfc_slave_alloc, 3830 .slave_configure = lpfc_slave_configure, 3831 .slave_destroy = lpfc_slave_destroy, 3832 .scan_finished = lpfc_scan_finished, 3833 .this_id = -1, 3834 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 3835 .cmd_per_lun = LPFC_CMD_PER_LUN, 3836 .use_clustering = ENABLE_CLUSTERING, 3837 .shost_attrs = lpfc_vport_attrs, 3838 .max_sectors = 0xFFFF, 3839 .change_queue_depth = lpfc_change_queue_depth, 3840 }; 3841