1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_version.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_logmsg.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_vport.h" 41 42 #define LPFC_RESET_WAIT 2 43 #define LPFC_ABORT_WAIT 2 44 45 /* 46 * This function is called with no lock held when there is a resource 47 * error in driver or in firmware. 48 */ 49 void 50 lpfc_adjust_queue_depth(struct lpfc_hba *phba) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&phba->hbalock, flags); 55 atomic_inc(&phba->num_rsrc_err); 56 phba->last_rsrc_error_time = jiffies; 57 58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 59 spin_unlock_irqrestore(&phba->hbalock, flags); 60 return; 61 } 62 63 phba->last_ramp_down_time = jiffies; 64 65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 if ((phba->pport->work_port_events & 69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 } 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 73 74 spin_lock_irqsave(&phba->hbalock, flags); 75 if (phba->work_wait) 76 wake_up(phba->work_wait); 77 spin_unlock_irqrestore(&phba->hbalock, flags); 78 79 return; 80 } 81 82 /* 83 * This function is called with no lock held when there is a successful 84 * SCSI command completion. 85 */ 86 static inline void 87 lpfc_rampup_queue_depth(struct lpfc_vport *vport, 88 struct scsi_device *sdev) 89 { 90 unsigned long flags; 91 struct lpfc_hba *phba = vport->phba; 92 atomic_inc(&phba->num_cmd_success); 93 94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 95 return; 96 spin_lock_irqsave(&phba->hbalock, flags); 97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 99 spin_unlock_irqrestore(&phba->hbalock, flags); 100 return; 101 } 102 phba->last_ramp_up_time = jiffies; 103 spin_unlock_irqrestore(&phba->hbalock, flags); 104 105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 106 if ((phba->pport->work_port_events & 107 WORKER_RAMP_UP_QUEUE) == 0) { 108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 109 } 110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 111 112 spin_lock_irqsave(&phba->hbalock, flags); 113 if (phba->work_wait) 114 wake_up(phba->work_wait); 115 spin_unlock_irqrestore(&phba->hbalock, flags); 116 } 117 118 void 119 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 120 { 121 struct lpfc_vport **vports; 122 struct Scsi_Host *shost; 123 struct scsi_device *sdev; 124 unsigned long new_queue_depth; 125 unsigned long num_rsrc_err, num_cmd_success; 126 int i; 127 128 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 129 num_cmd_success = atomic_read(&phba->num_cmd_success); 130 131 vports = lpfc_create_vport_work_array(phba); 132 if (vports != NULL) 133 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 134 shost = lpfc_shost_from_vport(vports[i]); 135 shost_for_each_device(sdev, shost) { 136 new_queue_depth = 137 sdev->queue_depth * num_rsrc_err / 138 (num_rsrc_err + num_cmd_success); 139 if (!new_queue_depth) 140 new_queue_depth = sdev->queue_depth - 1; 141 else 142 new_queue_depth = sdev->queue_depth - 143 new_queue_depth; 144 if (sdev->ordered_tags) 145 scsi_adjust_queue_depth(sdev, 146 MSG_ORDERED_TAG, 147 new_queue_depth); 148 else 149 scsi_adjust_queue_depth(sdev, 150 MSG_SIMPLE_TAG, 151 new_queue_depth); 152 } 153 } 154 lpfc_destroy_vport_work_array(phba, vports); 155 atomic_set(&phba->num_rsrc_err, 0); 156 atomic_set(&phba->num_cmd_success, 0); 157 } 158 159 void 160 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 161 { 162 struct lpfc_vport **vports; 163 struct Scsi_Host *shost; 164 struct scsi_device *sdev; 165 int i; 166 167 vports = lpfc_create_vport_work_array(phba); 168 if (vports != NULL) 169 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 170 shost = lpfc_shost_from_vport(vports[i]); 171 shost_for_each_device(sdev, shost) { 172 if (sdev->ordered_tags) 173 scsi_adjust_queue_depth(sdev, 174 MSG_ORDERED_TAG, 175 sdev->queue_depth+1); 176 else 177 scsi_adjust_queue_depth(sdev, 178 MSG_SIMPLE_TAG, 179 sdev->queue_depth+1); 180 } 181 } 182 lpfc_destroy_vport_work_array(phba, vports); 183 atomic_set(&phba->num_rsrc_err, 0); 184 atomic_set(&phba->num_cmd_success, 0); 185 } 186 187 /* 188 * This routine allocates a scsi buffer, which contains all the necessary 189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 190 * contains information to build the IOCB. The DMAable region contains 191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 193 * and the BPL BDE is setup in the IOCB. 194 */ 195 static struct lpfc_scsi_buf * 196 lpfc_new_scsi_buf(struct lpfc_vport *vport) 197 { 198 struct lpfc_hba *phba = vport->phba; 199 struct lpfc_scsi_buf *psb; 200 struct ulp_bde64 *bpl; 201 IOCB_t *iocb; 202 dma_addr_t pdma_phys; 203 uint16_t iotag; 204 205 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 206 if (!psb) 207 return NULL; 208 209 /* 210 * Get memory from the pci pool to map the virt space to pci bus space 211 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 212 * struct fcp_rsp and the number of bde's necessary to support the 213 * sg_tablesize. 214 */ 215 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 216 &psb->dma_handle); 217 if (!psb->data) { 218 kfree(psb); 219 return NULL; 220 } 221 222 /* Initialize virtual ptrs to dma_buf region. */ 223 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 224 225 /* Allocate iotag for psb->cur_iocbq. */ 226 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 227 if (iotag == 0) { 228 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 229 psb->data, psb->dma_handle); 230 kfree (psb); 231 return NULL; 232 } 233 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 234 235 psb->fcp_cmnd = psb->data; 236 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 237 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 238 sizeof(struct fcp_rsp); 239 240 /* Initialize local short-hand pointers. */ 241 bpl = psb->fcp_bpl; 242 pdma_phys = psb->dma_handle; 243 244 /* 245 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 246 * list bdes. Initialize the first two and leave the rest for 247 * queuecommand. 248 */ 249 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 250 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 251 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 252 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 253 bpl->tus.w = le32_to_cpu(bpl->tus.w); 254 bpl++; 255 256 /* Setup the physical region for the FCP RSP */ 257 pdma_phys += sizeof (struct fcp_cmnd); 258 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 259 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 260 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 261 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 262 bpl->tus.w = le32_to_cpu(bpl->tus.w); 263 264 /* 265 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 266 * initialize it with all known data now. 267 */ 268 pdma_phys += (sizeof (struct fcp_rsp)); 269 iocb = &psb->cur_iocbq.iocb; 270 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 271 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 272 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 273 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 274 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 275 iocb->ulpBdeCount = 1; 276 iocb->ulpClass = CLASS3; 277 278 return psb; 279 } 280 281 static struct lpfc_scsi_buf* 282 lpfc_get_scsi_buf(struct lpfc_hba * phba) 283 { 284 struct lpfc_scsi_buf * lpfc_cmd = NULL; 285 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 286 unsigned long iflag = 0; 287 288 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 289 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 290 if (lpfc_cmd) { 291 lpfc_cmd->seg_cnt = 0; 292 lpfc_cmd->nonsg_phys = 0; 293 } 294 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 295 return lpfc_cmd; 296 } 297 298 static void 299 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 300 { 301 unsigned long iflag = 0; 302 303 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 304 psb->pCmd = NULL; 305 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 306 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 307 } 308 309 static int 310 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 311 { 312 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 313 struct scatterlist *sgel = NULL; 314 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 315 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 316 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 317 dma_addr_t physaddr; 318 uint32_t i, num_bde = 0; 319 int nseg, datadir = scsi_cmnd->sc_data_direction; 320 321 /* 322 * There are three possibilities here - use scatter-gather segment, use 323 * the single mapping, or neither. Start the lpfc command prep by 324 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 325 * data bde entry. 326 */ 327 bpl += 2; 328 if (scsi_sg_count(scsi_cmnd)) { 329 /* 330 * The driver stores the segment count returned from pci_map_sg 331 * because this a count of dma-mappings used to map the use_sg 332 * pages. They are not guaranteed to be the same for those 333 * architectures that implement an IOMMU. 334 */ 335 336 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 337 scsi_sg_count(scsi_cmnd), datadir); 338 if (unlikely(!nseg)) 339 return 1; 340 341 lpfc_cmd->seg_cnt = nseg; 342 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 343 printk(KERN_ERR "%s: Too many sg segments from " 344 "dma_map_sg. Config %d, seg_cnt %d", 345 __FUNCTION__, phba->cfg_sg_seg_cnt, 346 lpfc_cmd->seg_cnt); 347 scsi_dma_unmap(scsi_cmnd); 348 return 1; 349 } 350 351 /* 352 * The driver established a maximum scatter-gather segment count 353 * during probe that limits the number of sg elements in any 354 * single scsi command. Just run through the seg_cnt and format 355 * the bde's. 356 */ 357 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { 358 physaddr = sg_dma_address(sgel); 359 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 360 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 361 bpl->tus.f.bdeSize = sg_dma_len(sgel); 362 if (datadir == DMA_TO_DEVICE) 363 bpl->tus.f.bdeFlags = 0; 364 else 365 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 366 bpl->tus.w = le32_to_cpu(bpl->tus.w); 367 bpl++; 368 num_bde++; 369 } 370 } 371 372 /* 373 * Finish initializing those IOCB fields that are dependent on the 374 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 375 * reinitialized since all iocb memory resources are used many times 376 * for transmit, receive, and continuation bpl's. 377 */ 378 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 379 iocb_cmd->un.fcpi64.bdl.bdeSize += 380 (num_bde * sizeof (struct ulp_bde64)); 381 iocb_cmd->ulpBdeCount = 1; 382 iocb_cmd->ulpLe = 1; 383 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); 384 return 0; 385 } 386 387 static void 388 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 389 { 390 /* 391 * There are only two special cases to consider. (1) the scsi command 392 * requested scatter-gather usage or (2) the scsi command allocated 393 * a request buffer, but did not request use_sg. There is a third 394 * case, but it does not require resource deallocation. 395 */ 396 if (psb->seg_cnt > 0) 397 scsi_dma_unmap(psb->pCmd); 398 } 399 400 static void 401 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 402 struct lpfc_iocbq *rsp_iocb) 403 { 404 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 405 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 406 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 407 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 408 uint32_t resp_info = fcprsp->rspStatus2; 409 uint32_t scsi_status = fcprsp->rspStatus3; 410 uint32_t *lp; 411 uint32_t host_status = DID_OK; 412 uint32_t rsplen = 0; 413 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 414 415 /* 416 * If this is a task management command, there is no 417 * scsi packet associated with this lpfc_cmd. The driver 418 * consumes it. 419 */ 420 if (fcpcmd->fcpCntl2) { 421 scsi_status = 0; 422 goto out; 423 } 424 425 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 426 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 427 if (snslen > SCSI_SENSE_BUFFERSIZE) 428 snslen = SCSI_SENSE_BUFFERSIZE; 429 430 if (resp_info & RSP_LEN_VALID) 431 rsplen = be32_to_cpu(fcprsp->rspRspLen); 432 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 433 } 434 lp = (uint32_t *)cmnd->sense_buffer; 435 436 if (!scsi_status && (resp_info & RESID_UNDER)) 437 logit = LOG_FCP; 438 439 lpfc_printf_vlog(vport, KERN_WARNING, logit, 440 "0730 FCP command x%x failed: x%x SNS x%x x%x " 441 "Data: x%x x%x x%x x%x x%x\n", 442 cmnd->cmnd[0], scsi_status, 443 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 444 be32_to_cpu(fcprsp->rspResId), 445 be32_to_cpu(fcprsp->rspSnsLen), 446 be32_to_cpu(fcprsp->rspRspLen), 447 fcprsp->rspInfo3); 448 449 if (resp_info & RSP_LEN_VALID) { 450 rsplen = be32_to_cpu(fcprsp->rspRspLen); 451 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 452 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 453 host_status = DID_ERROR; 454 goto out; 455 } 456 } 457 458 scsi_set_resid(cmnd, 0); 459 if (resp_info & RESID_UNDER) { 460 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 461 462 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 463 "0716 FCP Read Underrun, expected %d, " 464 "residual %d Data: x%x x%x x%x\n", 465 be32_to_cpu(fcpcmd->fcpDl), 466 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 467 cmnd->underflow); 468 469 /* 470 * If there is an under run check if under run reported by 471 * storage array is same as the under run reported by HBA. 472 * If this is not same, there is a dropped frame. 473 */ 474 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 475 fcpi_parm && 476 (scsi_get_resid(cmnd) != fcpi_parm)) { 477 lpfc_printf_vlog(vport, KERN_WARNING, 478 LOG_FCP | LOG_FCP_ERROR, 479 "0735 FCP Read Check Error " 480 "and Underrun Data: x%x x%x x%x x%x\n", 481 be32_to_cpu(fcpcmd->fcpDl), 482 scsi_get_resid(cmnd), fcpi_parm, 483 cmnd->cmnd[0]); 484 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 485 host_status = DID_ERROR; 486 } 487 /* 488 * The cmnd->underflow is the minimum number of bytes that must 489 * be transfered for this command. Provided a sense condition 490 * is not present, make sure the actual amount transferred is at 491 * least the underflow value or fail. 492 */ 493 if (!(resp_info & SNS_LEN_VALID) && 494 (scsi_status == SAM_STAT_GOOD) && 495 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 496 < cmnd->underflow)) { 497 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 498 "0717 FCP command x%x residual " 499 "underrun converted to error " 500 "Data: x%x x%x x%x\n", 501 cmnd->cmnd[0], scsi_bufflen(cmnd), 502 scsi_get_resid(cmnd), cmnd->underflow); 503 host_status = DID_ERROR; 504 } 505 } else if (resp_info & RESID_OVER) { 506 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 507 "0720 FCP command x%x residual overrun error. " 508 "Data: x%x x%x \n", cmnd->cmnd[0], 509 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 510 host_status = DID_ERROR; 511 512 /* 513 * Check SLI validation that all the transfer was actually done 514 * (fcpi_parm should be zero). Apply check only to reads. 515 */ 516 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 517 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 518 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 519 "0734 FCP Read Check Error Data: " 520 "x%x x%x x%x x%x\n", 521 be32_to_cpu(fcpcmd->fcpDl), 522 be32_to_cpu(fcprsp->rspResId), 523 fcpi_parm, cmnd->cmnd[0]); 524 host_status = DID_ERROR; 525 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 526 } 527 528 out: 529 cmnd->result = ScsiResult(host_status, scsi_status); 530 } 531 532 static void 533 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 534 struct lpfc_iocbq *pIocbOut) 535 { 536 struct lpfc_scsi_buf *lpfc_cmd = 537 (struct lpfc_scsi_buf *) pIocbIn->context1; 538 struct lpfc_vport *vport = pIocbIn->vport; 539 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 540 struct lpfc_nodelist *pnode = rdata->pnode; 541 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 542 int result; 543 struct scsi_device *sdev, *tmp_sdev; 544 int depth = 0; 545 unsigned long flags; 546 547 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 548 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 549 550 if (lpfc_cmd->status) { 551 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 552 (lpfc_cmd->result & IOERR_DRVR_MASK)) 553 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 554 else if (lpfc_cmd->status >= IOSTAT_CNT) 555 lpfc_cmd->status = IOSTAT_DEFAULT; 556 557 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 558 "0729 FCP cmd x%x failed <%d/%d> " 559 "status: x%x result: x%x Data: x%x x%x\n", 560 cmd->cmnd[0], 561 cmd->device ? cmd->device->id : 0xffff, 562 cmd->device ? cmd->device->lun : 0xffff, 563 lpfc_cmd->status, lpfc_cmd->result, 564 pIocbOut->iocb.ulpContext, 565 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 566 567 switch (lpfc_cmd->status) { 568 case IOSTAT_FCP_RSP_ERROR: 569 /* Call FCP RSP handler to determine result */ 570 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 571 break; 572 case IOSTAT_NPORT_BSY: 573 case IOSTAT_FABRIC_BSY: 574 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 575 break; 576 case IOSTAT_LOCAL_REJECT: 577 if (lpfc_cmd->result == RJT_UNAVAIL_PERM || 578 lpfc_cmd->result == IOERR_NO_RESOURCES || 579 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 580 cmd->result = ScsiResult(DID_REQUEUE, 0); 581 break; 582 } /* else: fall through */ 583 default: 584 cmd->result = ScsiResult(DID_ERROR, 0); 585 break; 586 } 587 588 if ((pnode == NULL ) 589 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 590 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 591 } else { 592 cmd->result = ScsiResult(DID_OK, 0); 593 } 594 595 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 596 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 597 598 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 599 "0710 Iodone <%d/%d> cmd %p, error " 600 "x%x SNS x%x x%x Data: x%x x%x\n", 601 cmd->device->id, cmd->device->lun, cmd, 602 cmd->result, *lp, *(lp + 3), cmd->retries, 603 scsi_get_resid(cmd)); 604 } 605 606 result = cmd->result; 607 sdev = cmd->device; 608 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 609 cmd->scsi_done(cmd); 610 611 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 612 /* 613 * If there is a thread waiting for command completion 614 * wake up the thread. 615 */ 616 spin_lock_irqsave(sdev->host->host_lock, flags); 617 lpfc_cmd->pCmd = NULL; 618 if (lpfc_cmd->waitq) 619 wake_up(lpfc_cmd->waitq); 620 spin_unlock_irqrestore(sdev->host->host_lock, flags); 621 lpfc_release_scsi_buf(phba, lpfc_cmd); 622 return; 623 } 624 625 626 if (!result) 627 lpfc_rampup_queue_depth(vport, sdev); 628 629 if (!result && pnode != NULL && 630 ((jiffies - pnode->last_ramp_up_time) > 631 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 632 ((jiffies - pnode->last_q_full_time) > 633 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 634 (vport->cfg_lun_queue_depth > sdev->queue_depth)) { 635 shost_for_each_device(tmp_sdev, sdev->host) { 636 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ 637 if (tmp_sdev->id != sdev->id) 638 continue; 639 if (tmp_sdev->ordered_tags) 640 scsi_adjust_queue_depth(tmp_sdev, 641 MSG_ORDERED_TAG, 642 tmp_sdev->queue_depth+1); 643 else 644 scsi_adjust_queue_depth(tmp_sdev, 645 MSG_SIMPLE_TAG, 646 tmp_sdev->queue_depth+1); 647 648 pnode->last_ramp_up_time = jiffies; 649 } 650 } 651 } 652 653 /* 654 * Check for queue full. If the lun is reporting queue full, then 655 * back off the lun queue depth to prevent target overloads. 656 */ 657 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 658 pnode->last_q_full_time = jiffies; 659 660 shost_for_each_device(tmp_sdev, sdev->host) { 661 if (tmp_sdev->id != sdev->id) 662 continue; 663 depth = scsi_track_queue_full(tmp_sdev, 664 tmp_sdev->queue_depth - 1); 665 } 666 /* 667 * The queue depth cannot be lowered any more. 668 * Modify the returned error code to store 669 * the final depth value set by 670 * scsi_track_queue_full. 671 */ 672 if (depth == -1) 673 depth = sdev->host->cmd_per_lun; 674 675 if (depth) { 676 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 677 "0711 detected queue full - lun queue " 678 "depth adjusted to %d.\n", depth); 679 } 680 } 681 682 /* 683 * If there is a thread waiting for command completion 684 * wake up the thread. 685 */ 686 spin_lock_irqsave(sdev->host->host_lock, flags); 687 lpfc_cmd->pCmd = NULL; 688 if (lpfc_cmd->waitq) 689 wake_up(lpfc_cmd->waitq); 690 spin_unlock_irqrestore(sdev->host->host_lock, flags); 691 692 lpfc_release_scsi_buf(phba, lpfc_cmd); 693 } 694 695 static void 696 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 697 struct lpfc_nodelist *pnode) 698 { 699 struct lpfc_hba *phba = vport->phba; 700 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 701 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 702 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 703 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 704 int datadir = scsi_cmnd->sc_data_direction; 705 char tag[2]; 706 707 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 708 /* clear task management bits */ 709 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 710 711 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 712 &lpfc_cmd->fcp_cmnd->fcp_lun); 713 714 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 715 716 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 717 switch (tag[0]) { 718 case HEAD_OF_QUEUE_TAG: 719 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 720 break; 721 case ORDERED_QUEUE_TAG: 722 fcp_cmnd->fcpCntl1 = ORDERED_Q; 723 break; 724 default: 725 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 726 break; 727 } 728 } else 729 fcp_cmnd->fcpCntl1 = 0; 730 731 /* 732 * There are three possibilities here - use scatter-gather segment, use 733 * the single mapping, or neither. Start the lpfc command prep by 734 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 735 * data bde entry. 736 */ 737 if (scsi_sg_count(scsi_cmnd)) { 738 if (datadir == DMA_TO_DEVICE) { 739 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 740 iocb_cmd->un.fcpi.fcpi_parm = 0; 741 iocb_cmd->ulpPU = 0; 742 fcp_cmnd->fcpCntl3 = WRITE_DATA; 743 phba->fc4OutputRequests++; 744 } else { 745 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 746 iocb_cmd->ulpPU = PARM_READ_CHECK; 747 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 748 fcp_cmnd->fcpCntl3 = READ_DATA; 749 phba->fc4InputRequests++; 750 } 751 } else { 752 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 753 iocb_cmd->un.fcpi.fcpi_parm = 0; 754 iocb_cmd->ulpPU = 0; 755 fcp_cmnd->fcpCntl3 = 0; 756 phba->fc4ControlRequests++; 757 } 758 759 /* 760 * Finish initializing those IOCB fields that are independent 761 * of the scsi_cmnd request_buffer 762 */ 763 piocbq->iocb.ulpContext = pnode->nlp_rpi; 764 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 765 piocbq->iocb.ulpFCP2Rcvy = 1; 766 else 767 piocbq->iocb.ulpFCP2Rcvy = 0; 768 769 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 770 piocbq->context1 = lpfc_cmd; 771 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 772 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 773 piocbq->vport = vport; 774 } 775 776 static int 777 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 778 struct lpfc_scsi_buf *lpfc_cmd, 779 unsigned int lun, 780 uint8_t task_mgmt_cmd) 781 { 782 struct lpfc_iocbq *piocbq; 783 IOCB_t *piocb; 784 struct fcp_cmnd *fcp_cmnd; 785 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 786 struct lpfc_nodelist *ndlp = rdata->pnode; 787 788 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 789 return 0; 790 } 791 792 piocbq = &(lpfc_cmd->cur_iocbq); 793 piocbq->vport = vport; 794 795 piocb = &piocbq->iocb; 796 797 fcp_cmnd = lpfc_cmd->fcp_cmnd; 798 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 799 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 800 801 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 802 803 piocb->ulpContext = ndlp->nlp_rpi; 804 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 805 piocb->ulpFCP2Rcvy = 1; 806 } 807 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 808 809 /* ulpTimeout is only one byte */ 810 if (lpfc_cmd->timeout > 0xff) { 811 /* 812 * Do not timeout the command at the firmware level. 813 * The driver will provide the timeout mechanism. 814 */ 815 piocb->ulpTimeout = 0; 816 } else { 817 piocb->ulpTimeout = lpfc_cmd->timeout; 818 } 819 820 return 1; 821 } 822 823 static void 824 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 825 struct lpfc_iocbq *cmdiocbq, 826 struct lpfc_iocbq *rspiocbq) 827 { 828 struct lpfc_scsi_buf *lpfc_cmd = 829 (struct lpfc_scsi_buf *) cmdiocbq->context1; 830 if (lpfc_cmd) 831 lpfc_release_scsi_buf(phba, lpfc_cmd); 832 return; 833 } 834 835 static int 836 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 837 unsigned tgt_id, unsigned int lun, 838 struct lpfc_rport_data *rdata) 839 { 840 struct lpfc_hba *phba = vport->phba; 841 struct lpfc_iocbq *iocbq; 842 struct lpfc_iocbq *iocbqrsp; 843 int ret; 844 845 if (!rdata->pnode) 846 return FAILED; 847 848 lpfc_cmd->rdata = rdata; 849 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 850 FCP_TARGET_RESET); 851 if (!ret) 852 return FAILED; 853 854 iocbq = &lpfc_cmd->cur_iocbq; 855 iocbqrsp = lpfc_sli_get_iocbq(phba); 856 857 if (!iocbqrsp) 858 return FAILED; 859 860 /* Issue Target Reset to TGT <num> */ 861 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 862 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 863 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 864 ret = lpfc_sli_issue_iocb_wait(phba, 865 &phba->sli.ring[phba->sli.fcp_ring], 866 iocbq, iocbqrsp, lpfc_cmd->timeout); 867 if (ret != IOCB_SUCCESS) { 868 if (ret == IOCB_TIMEDOUT) 869 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 870 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 871 } else { 872 ret = SUCCESS; 873 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 874 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 875 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 876 (lpfc_cmd->result & IOERR_DRVR_MASK)) 877 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 878 } 879 880 lpfc_sli_release_iocbq(phba, iocbqrsp); 881 return ret; 882 } 883 884 const char * 885 lpfc_info(struct Scsi_Host *host) 886 { 887 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 888 struct lpfc_hba *phba = vport->phba; 889 int len; 890 static char lpfcinfobuf[384]; 891 892 memset(lpfcinfobuf,0,384); 893 if (phba && phba->pcidev){ 894 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 895 len = strlen(lpfcinfobuf); 896 snprintf(lpfcinfobuf + len, 897 384-len, 898 " on PCI bus %02x device %02x irq %d", 899 phba->pcidev->bus->number, 900 phba->pcidev->devfn, 901 phba->pcidev->irq); 902 len = strlen(lpfcinfobuf); 903 if (phba->Port[0]) { 904 snprintf(lpfcinfobuf + len, 905 384-len, 906 " port %s", 907 phba->Port); 908 } 909 } 910 return lpfcinfobuf; 911 } 912 913 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 914 { 915 unsigned long poll_tmo_expires = 916 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 917 918 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 919 mod_timer(&phba->fcp_poll_timer, 920 poll_tmo_expires); 921 } 922 923 void lpfc_poll_start_timer(struct lpfc_hba * phba) 924 { 925 lpfc_poll_rearm_timer(phba); 926 } 927 928 void lpfc_poll_timeout(unsigned long ptr) 929 { 930 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 931 932 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 933 lpfc_sli_poll_fcp_ring (phba); 934 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 935 lpfc_poll_rearm_timer(phba); 936 } 937 } 938 939 static int 940 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 941 { 942 struct Scsi_Host *shost = cmnd->device->host; 943 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 944 struct lpfc_hba *phba = vport->phba; 945 struct lpfc_sli *psli = &phba->sli; 946 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 947 struct lpfc_nodelist *ndlp = rdata->pnode; 948 struct lpfc_scsi_buf *lpfc_cmd; 949 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 950 int err; 951 952 err = fc_remote_port_chkready(rport); 953 if (err) { 954 cmnd->result = err; 955 goto out_fail_command; 956 } 957 958 /* 959 * Catch race where our node has transitioned, but the 960 * transport is still transitioning. 961 */ 962 if (!ndlp) { 963 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 964 goto out_fail_command; 965 } 966 lpfc_cmd = lpfc_get_scsi_buf(phba); 967 if (lpfc_cmd == NULL) { 968 lpfc_adjust_queue_depth(phba); 969 970 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 971 "0707 driver's buffer pool is empty, " 972 "IO busied\n"); 973 goto out_host_busy; 974 } 975 976 /* 977 * Store the midlayer's command structure for the completion phase 978 * and complete the command initialization. 979 */ 980 lpfc_cmd->pCmd = cmnd; 981 lpfc_cmd->rdata = rdata; 982 lpfc_cmd->timeout = 0; 983 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 984 cmnd->scsi_done = done; 985 986 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 987 if (err) 988 goto out_host_busy_free_buf; 989 990 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 991 992 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 993 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 994 if (err) 995 goto out_host_busy_free_buf; 996 997 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 998 lpfc_sli_poll_fcp_ring(phba); 999 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1000 lpfc_poll_rearm_timer(phba); 1001 } 1002 1003 return 0; 1004 1005 out_host_busy_free_buf: 1006 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1007 lpfc_release_scsi_buf(phba, lpfc_cmd); 1008 out_host_busy: 1009 return SCSI_MLQUEUE_HOST_BUSY; 1010 1011 out_fail_command: 1012 done(cmnd); 1013 return 0; 1014 } 1015 1016 static void 1017 lpfc_block_error_handler(struct scsi_cmnd *cmnd) 1018 { 1019 struct Scsi_Host *shost = cmnd->device->host; 1020 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1021 1022 spin_lock_irq(shost->host_lock); 1023 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 1024 spin_unlock_irq(shost->host_lock); 1025 msleep(1000); 1026 spin_lock_irq(shost->host_lock); 1027 } 1028 spin_unlock_irq(shost->host_lock); 1029 return; 1030 } 1031 1032 static int 1033 lpfc_abort_handler(struct scsi_cmnd *cmnd) 1034 { 1035 struct Scsi_Host *shost = cmnd->device->host; 1036 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1037 struct lpfc_hba *phba = vport->phba; 1038 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1039 struct lpfc_iocbq *iocb; 1040 struct lpfc_iocbq *abtsiocb; 1041 struct lpfc_scsi_buf *lpfc_cmd; 1042 IOCB_t *cmd, *icmd; 1043 int ret = SUCCESS; 1044 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 1045 1046 lpfc_block_error_handler(cmnd); 1047 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1048 BUG_ON(!lpfc_cmd); 1049 1050 /* 1051 * If pCmd field of the corresponding lpfc_scsi_buf structure 1052 * points to a different SCSI command, then the driver has 1053 * already completed this command, but the midlayer did not 1054 * see the completion before the eh fired. Just return 1055 * SUCCESS. 1056 */ 1057 iocb = &lpfc_cmd->cur_iocbq; 1058 if (lpfc_cmd->pCmd != cmnd) 1059 goto out; 1060 1061 BUG_ON(iocb->context1 != lpfc_cmd); 1062 1063 abtsiocb = lpfc_sli_get_iocbq(phba); 1064 if (abtsiocb == NULL) { 1065 ret = FAILED; 1066 goto out; 1067 } 1068 1069 /* 1070 * The scsi command can not be in txq and it is in flight because the 1071 * pCmd is still pointig at the SCSI command we have to abort. There 1072 * is no need to search the txcmplq. Just send an abort to the FW. 1073 */ 1074 1075 cmd = &iocb->iocb; 1076 icmd = &abtsiocb->iocb; 1077 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 1078 icmd->un.acxri.abortContextTag = cmd->ulpContext; 1079 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 1080 1081 icmd->ulpLe = 1; 1082 icmd->ulpClass = cmd->ulpClass; 1083 if (lpfc_is_link_up(phba)) 1084 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1085 else 1086 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1087 1088 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1089 abtsiocb->vport = vport; 1090 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1091 lpfc_sli_release_iocbq(phba, abtsiocb); 1092 ret = FAILED; 1093 goto out; 1094 } 1095 1096 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1097 lpfc_sli_poll_fcp_ring (phba); 1098 1099 lpfc_cmd->waitq = &waitq; 1100 /* Wait for abort to complete */ 1101 wait_event_timeout(waitq, 1102 (lpfc_cmd->pCmd != cmnd), 1103 (2*vport->cfg_devloss_tmo*HZ)); 1104 1105 spin_lock_irq(shost->host_lock); 1106 lpfc_cmd->waitq = NULL; 1107 spin_unlock_irq(shost->host_lock); 1108 1109 if (lpfc_cmd->pCmd == cmnd) { 1110 ret = FAILED; 1111 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1112 "0748 abort handler timed out waiting " 1113 "for abort to complete: ret %#x, ID %d, " 1114 "LUN %d, snum %#lx\n", 1115 ret, cmnd->device->id, cmnd->device->lun, 1116 cmnd->serial_number); 1117 } 1118 1119 out: 1120 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1121 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 1122 "LUN %d snum %#lx\n", ret, cmnd->device->id, 1123 cmnd->device->lun, cmnd->serial_number); 1124 return ret; 1125 } 1126 1127 static int 1128 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1129 { 1130 struct Scsi_Host *shost = cmnd->device->host; 1131 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1132 struct lpfc_hba *phba = vport->phba; 1133 struct lpfc_scsi_buf *lpfc_cmd; 1134 struct lpfc_iocbq *iocbq, *iocbqrsp; 1135 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1136 struct lpfc_nodelist *pnode = rdata->pnode; 1137 uint32_t cmd_result = 0, cmd_status = 0; 1138 int ret = FAILED; 1139 int iocb_status = IOCB_SUCCESS; 1140 int cnt, loopcnt; 1141 1142 lpfc_block_error_handler(cmnd); 1143 loopcnt = 0; 1144 /* 1145 * If target is not in a MAPPED state, delay the reset until 1146 * target is rediscovered or devloss timeout expires. 1147 */ 1148 while (1) { 1149 if (!pnode) 1150 goto out; 1151 1152 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1153 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1154 loopcnt++; 1155 rdata = cmnd->device->hostdata; 1156 if (!rdata || 1157 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){ 1158 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1159 "0721 LUN Reset rport " 1160 "failure: cnt x%x rdata x%p\n", 1161 loopcnt, rdata); 1162 goto out; 1163 } 1164 pnode = rdata->pnode; 1165 if (!pnode) 1166 goto out; 1167 } 1168 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1169 break; 1170 } 1171 1172 lpfc_cmd = lpfc_get_scsi_buf(phba); 1173 if (lpfc_cmd == NULL) 1174 goto out; 1175 1176 lpfc_cmd->timeout = 60; 1177 lpfc_cmd->rdata = rdata; 1178 1179 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1180 FCP_TARGET_RESET); 1181 if (!ret) 1182 goto out_free_scsi_buf; 1183 1184 iocbq = &lpfc_cmd->cur_iocbq; 1185 1186 /* get a buffer for this IOCB command response */ 1187 iocbqrsp = lpfc_sli_get_iocbq(phba); 1188 if (iocbqrsp == NULL) 1189 goto out_free_scsi_buf; 1190 1191 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1192 "0703 Issue target reset to TGT %d LUN %d " 1193 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 1194 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1195 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1196 &phba->sli.ring[phba->sli.fcp_ring], 1197 iocbq, iocbqrsp, lpfc_cmd->timeout); 1198 1199 if (iocb_status == IOCB_TIMEDOUT) 1200 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1201 1202 if (iocb_status == IOCB_SUCCESS) 1203 ret = SUCCESS; 1204 else 1205 ret = iocb_status; 1206 1207 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1208 cmd_status = iocbqrsp->iocb.ulpStatus; 1209 1210 lpfc_sli_release_iocbq(phba, iocbqrsp); 1211 1212 /* 1213 * All outstanding txcmplq I/Os should have been aborted by the device. 1214 * Unfortunately, some targets do not abide by this forcing the driver 1215 * to double check. 1216 */ 1217 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 1218 LPFC_CTX_LUN); 1219 if (cnt) 1220 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1221 cmnd->device->id, cmnd->device->lun, 1222 LPFC_CTX_LUN); 1223 loopcnt = 0; 1224 while(cnt) { 1225 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1226 1227 if (++loopcnt 1228 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1229 break; 1230 1231 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 1232 cmnd->device->lun, LPFC_CTX_LUN); 1233 } 1234 1235 if (cnt) { 1236 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1237 "0719 device reset I/O flush failure: " 1238 "cnt x%x\n", cnt); 1239 ret = FAILED; 1240 } 1241 1242 out_free_scsi_buf: 1243 if (iocb_status != IOCB_TIMEDOUT) { 1244 lpfc_release_scsi_buf(phba, lpfc_cmd); 1245 } 1246 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1247 "0713 SCSI layer issued device reset (%d, %d) " 1248 "return x%x status x%x result x%x\n", 1249 cmnd->device->id, cmnd->device->lun, ret, 1250 cmd_status, cmd_result); 1251 out: 1252 return ret; 1253 } 1254 1255 static int 1256 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1257 { 1258 struct Scsi_Host *shost = cmnd->device->host; 1259 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1260 struct lpfc_hba *phba = vport->phba; 1261 struct lpfc_nodelist *ndlp = NULL; 1262 int match; 1263 int ret = FAILED, i, err_count = 0; 1264 int cnt, loopcnt; 1265 struct lpfc_scsi_buf * lpfc_cmd; 1266 1267 lpfc_block_error_handler(cmnd); 1268 1269 lpfc_cmd = lpfc_get_scsi_buf(phba); 1270 if (lpfc_cmd == NULL) 1271 goto out; 1272 1273 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1274 lpfc_cmd->timeout = 60; 1275 1276 /* 1277 * Since the driver manages a single bus device, reset all 1278 * targets known to the driver. Should any target reset 1279 * fail, this routine returns failure to the midlayer. 1280 */ 1281 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1282 /* Search for mapped node by target ID */ 1283 match = 0; 1284 spin_lock_irq(shost->host_lock); 1285 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1286 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1287 i == ndlp->nlp_sid && 1288 ndlp->rport) { 1289 match = 1; 1290 break; 1291 } 1292 } 1293 spin_unlock_irq(shost->host_lock); 1294 if (!match) 1295 continue; 1296 1297 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1298 cmnd->device->lun, 1299 ndlp->rport->dd_data); 1300 if (ret != SUCCESS) { 1301 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1302 "0700 Bus Reset on target %d failed\n", 1303 i); 1304 err_count++; 1305 break; 1306 } 1307 } 1308 1309 if (ret != IOCB_TIMEDOUT) 1310 lpfc_release_scsi_buf(phba, lpfc_cmd); 1311 1312 if (err_count == 0) 1313 ret = SUCCESS; 1314 else 1315 ret = FAILED; 1316 1317 /* 1318 * All outstanding txcmplq I/Os should have been aborted by 1319 * the targets. Unfortunately, some targets do not abide by 1320 * this forcing the driver to double check. 1321 */ 1322 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1323 if (cnt) 1324 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1325 0, 0, LPFC_CTX_HOST); 1326 loopcnt = 0; 1327 while(cnt) { 1328 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1329 1330 if (++loopcnt 1331 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1332 break; 1333 1334 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1335 } 1336 1337 if (cnt) { 1338 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1339 "0715 Bus Reset I/O flush failure: " 1340 "cnt x%x left x%x\n", cnt, i); 1341 ret = FAILED; 1342 } 1343 1344 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1345 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 1346 out: 1347 return ret; 1348 } 1349 1350 static int 1351 lpfc_slave_alloc(struct scsi_device *sdev) 1352 { 1353 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1354 struct lpfc_hba *phba = vport->phba; 1355 struct lpfc_scsi_buf *scsi_buf = NULL; 1356 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1357 uint32_t total = 0, i; 1358 uint32_t num_to_alloc = 0; 1359 unsigned long flags; 1360 1361 if (!rport || fc_remote_port_chkready(rport)) 1362 return -ENXIO; 1363 1364 sdev->hostdata = rport->dd_data; 1365 1366 /* 1367 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1368 * available list of scsi buffers. Don't allocate more than the 1369 * HBA limit conveyed to the midlayer via the host structure. The 1370 * formula accounts for the lun_queue_depth + error handlers + 1 1371 * extra. This list of scsi bufs exists for the lifetime of the driver. 1372 */ 1373 total = phba->total_scsi_bufs; 1374 num_to_alloc = vport->cfg_lun_queue_depth + 2; 1375 1376 /* Allow some exchanges to be available always to complete discovery */ 1377 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1378 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1379 "0704 At limitation of %d preallocated " 1380 "command buffers\n", total); 1381 return 0; 1382 /* Allow some exchanges to be available always to complete discovery */ 1383 } else if (total + num_to_alloc > 1384 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1386 "0705 Allocation request of %d " 1387 "command buffers will exceed max of %d. " 1388 "Reducing allocation request to %d.\n", 1389 num_to_alloc, phba->cfg_hba_queue_depth, 1390 (phba->cfg_hba_queue_depth - total)); 1391 num_to_alloc = phba->cfg_hba_queue_depth - total; 1392 } 1393 1394 for (i = 0; i < num_to_alloc; i++) { 1395 scsi_buf = lpfc_new_scsi_buf(vport); 1396 if (!scsi_buf) { 1397 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1398 "0706 Failed to allocate " 1399 "command buffer\n"); 1400 break; 1401 } 1402 1403 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1404 phba->total_scsi_bufs++; 1405 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1406 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1407 } 1408 return 0; 1409 } 1410 1411 static int 1412 lpfc_slave_configure(struct scsi_device *sdev) 1413 { 1414 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1415 struct lpfc_hba *phba = vport->phba; 1416 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1417 1418 if (sdev->tagged_supported) 1419 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 1420 else 1421 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 1422 1423 /* 1424 * Initialize the fc transport attributes for the target 1425 * containing this scsi device. Also note that the driver's 1426 * target pointer is stored in the starget_data for the 1427 * driver's sysfs entry point functions. 1428 */ 1429 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1430 1431 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1432 lpfc_sli_poll_fcp_ring(phba); 1433 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1434 lpfc_poll_rearm_timer(phba); 1435 } 1436 1437 return 0; 1438 } 1439 1440 static void 1441 lpfc_slave_destroy(struct scsi_device *sdev) 1442 { 1443 sdev->hostdata = NULL; 1444 return; 1445 } 1446 1447 1448 struct scsi_host_template lpfc_template = { 1449 .module = THIS_MODULE, 1450 .name = LPFC_DRIVER_NAME, 1451 .info = lpfc_info, 1452 .queuecommand = lpfc_queuecommand, 1453 .eh_abort_handler = lpfc_abort_handler, 1454 .eh_device_reset_handler= lpfc_device_reset_handler, 1455 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1456 .slave_alloc = lpfc_slave_alloc, 1457 .slave_configure = lpfc_slave_configure, 1458 .slave_destroy = lpfc_slave_destroy, 1459 .scan_finished = lpfc_scan_finished, 1460 .this_id = -1, 1461 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 1462 .cmd_per_lun = LPFC_CMD_PER_LUN, 1463 .use_clustering = ENABLE_CLUSTERING, 1464 .shost_attrs = lpfc_hba_attrs, 1465 .max_sectors = 0xFFFF, 1466 }; 1467 1468 struct scsi_host_template lpfc_vport_template = { 1469 .module = THIS_MODULE, 1470 .name = LPFC_DRIVER_NAME, 1471 .info = lpfc_info, 1472 .queuecommand = lpfc_queuecommand, 1473 .eh_abort_handler = lpfc_abort_handler, 1474 .eh_device_reset_handler= lpfc_device_reset_handler, 1475 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1476 .slave_alloc = lpfc_slave_alloc, 1477 .slave_configure = lpfc_slave_configure, 1478 .slave_destroy = lpfc_slave_destroy, 1479 .scan_finished = lpfc_scan_finished, 1480 .this_id = -1, 1481 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, 1482 .cmd_per_lun = LPFC_CMD_PER_LUN, 1483 .use_clustering = ENABLE_CLUSTERING, 1484 .shost_attrs = lpfc_vport_attrs, 1485 .max_sectors = 0xFFFF, 1486 }; 1487