1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_version.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_logmsg.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_vport.h" 41 42 #define LPFC_RESET_WAIT 2 43 #define LPFC_ABORT_WAIT 2 44 45 /* 46 * This function is called with no lock held when there is a resource 47 * error in driver or in firmware. 48 */ 49 void 50 lpfc_adjust_queue_depth(struct lpfc_hba *phba) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&phba->hbalock, flags); 55 atomic_inc(&phba->num_rsrc_err); 56 phba->last_rsrc_error_time = jiffies; 57 58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 59 spin_unlock_irqrestore(&phba->hbalock, flags); 60 return; 61 } 62 63 phba->last_ramp_down_time = jiffies; 64 65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 if ((phba->pport->work_port_events & 69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 } 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 73 74 spin_lock_irqsave(&phba->hbalock, flags); 75 if (phba->work_wait) 76 wake_up(phba->work_wait); 77 spin_unlock_irqrestore(&phba->hbalock, flags); 78 79 return; 80 } 81 82 /* 83 * This function is called with no lock held when there is a successful 84 * SCSI command completion. 85 */ 86 static inline void 87 lpfc_rampup_queue_depth(struct lpfc_vport *vport, 88 struct scsi_device *sdev) 89 { 90 unsigned long flags; 91 struct lpfc_hba *phba = vport->phba; 92 atomic_inc(&phba->num_cmd_success); 93 94 if (vport->cfg_lun_queue_depth <= sdev->queue_depth) 95 return; 96 spin_lock_irqsave(&phba->hbalock, flags); 97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 99 spin_unlock_irqrestore(&phba->hbalock, flags); 100 return; 101 } 102 phba->last_ramp_up_time = jiffies; 103 spin_unlock_irqrestore(&phba->hbalock, flags); 104 105 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 106 if ((phba->pport->work_port_events & 107 WORKER_RAMP_UP_QUEUE) == 0) { 108 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 109 } 110 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 111 112 spin_lock_irqsave(&phba->hbalock, flags); 113 if (phba->work_wait) 114 wake_up(phba->work_wait); 115 spin_unlock_irqrestore(&phba->hbalock, flags); 116 } 117 118 void 119 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 120 { 121 struct lpfc_vport **vports; 122 struct Scsi_Host *shost; 123 struct scsi_device *sdev; 124 unsigned long new_queue_depth; 125 unsigned long num_rsrc_err, num_cmd_success; 126 int i; 127 128 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 129 num_cmd_success = atomic_read(&phba->num_cmd_success); 130 131 vports = lpfc_create_vport_work_array(phba); 132 if (vports != NULL) 133 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 134 shost = lpfc_shost_from_vport(vports[i]); 135 shost_for_each_device(sdev, shost) { 136 new_queue_depth = 137 sdev->queue_depth * num_rsrc_err / 138 (num_rsrc_err + num_cmd_success); 139 if (!new_queue_depth) 140 new_queue_depth = sdev->queue_depth - 1; 141 else 142 new_queue_depth = sdev->queue_depth - 143 new_queue_depth; 144 if (sdev->ordered_tags) 145 scsi_adjust_queue_depth(sdev, 146 MSG_ORDERED_TAG, 147 new_queue_depth); 148 else 149 scsi_adjust_queue_depth(sdev, 150 MSG_SIMPLE_TAG, 151 new_queue_depth); 152 } 153 } 154 lpfc_destroy_vport_work_array(vports); 155 atomic_set(&phba->num_rsrc_err, 0); 156 atomic_set(&phba->num_cmd_success, 0); 157 } 158 159 void 160 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 161 { 162 struct lpfc_vport **vports; 163 struct Scsi_Host *shost; 164 struct scsi_device *sdev; 165 int i; 166 167 vports = lpfc_create_vport_work_array(phba); 168 if (vports != NULL) 169 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) { 170 shost = lpfc_shost_from_vport(vports[i]); 171 shost_for_each_device(sdev, shost) { 172 if (sdev->ordered_tags) 173 scsi_adjust_queue_depth(sdev, 174 MSG_ORDERED_TAG, 175 sdev->queue_depth+1); 176 else 177 scsi_adjust_queue_depth(sdev, 178 MSG_SIMPLE_TAG, 179 sdev->queue_depth+1); 180 } 181 } 182 lpfc_destroy_vport_work_array(vports); 183 atomic_set(&phba->num_rsrc_err, 0); 184 atomic_set(&phba->num_cmd_success, 0); 185 } 186 187 /* 188 * This routine allocates a scsi buffer, which contains all the necessary 189 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 190 * contains information to build the IOCB. The DMAable region contains 191 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 192 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 193 * and the BPL BDE is setup in the IOCB. 194 */ 195 static struct lpfc_scsi_buf * 196 lpfc_new_scsi_buf(struct lpfc_vport *vport) 197 { 198 struct lpfc_hba *phba = vport->phba; 199 struct lpfc_scsi_buf *psb; 200 struct ulp_bde64 *bpl; 201 IOCB_t *iocb; 202 dma_addr_t pdma_phys; 203 uint16_t iotag; 204 205 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 206 if (!psb) 207 return NULL; 208 209 /* 210 * Get memory from the pci pool to map the virt space to pci bus space 211 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 212 * struct fcp_rsp and the number of bde's necessary to support the 213 * sg_tablesize. 214 */ 215 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 216 &psb->dma_handle); 217 if (!psb->data) { 218 kfree(psb); 219 return NULL; 220 } 221 222 /* Initialize virtual ptrs to dma_buf region. */ 223 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 224 225 /* Allocate iotag for psb->cur_iocbq. */ 226 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 227 if (iotag == 0) { 228 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 229 psb->data, psb->dma_handle); 230 kfree (psb); 231 return NULL; 232 } 233 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 234 235 psb->fcp_cmnd = psb->data; 236 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 237 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 238 sizeof(struct fcp_rsp); 239 240 /* Initialize local short-hand pointers. */ 241 bpl = psb->fcp_bpl; 242 pdma_phys = psb->dma_handle; 243 244 /* 245 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 246 * list bdes. Initialize the first two and leave the rest for 247 * queuecommand. 248 */ 249 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 250 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 251 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 252 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 253 bpl->tus.w = le32_to_cpu(bpl->tus.w); 254 bpl++; 255 256 /* Setup the physical region for the FCP RSP */ 257 pdma_phys += sizeof (struct fcp_cmnd); 258 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 259 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 260 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 261 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 262 bpl->tus.w = le32_to_cpu(bpl->tus.w); 263 264 /* 265 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 266 * initialize it with all known data now. 267 */ 268 pdma_phys += (sizeof (struct fcp_rsp)); 269 iocb = &psb->cur_iocbq.iocb; 270 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 271 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 272 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 273 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 274 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 275 iocb->ulpBdeCount = 1; 276 iocb->ulpClass = CLASS3; 277 278 return psb; 279 } 280 281 static struct lpfc_scsi_buf* 282 lpfc_get_scsi_buf(struct lpfc_hba * phba) 283 { 284 struct lpfc_scsi_buf * lpfc_cmd = NULL; 285 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 286 unsigned long iflag = 0; 287 288 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 289 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 290 if (lpfc_cmd) { 291 lpfc_cmd->seg_cnt = 0; 292 lpfc_cmd->nonsg_phys = 0; 293 } 294 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 295 return lpfc_cmd; 296 } 297 298 static void 299 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 300 { 301 unsigned long iflag = 0; 302 303 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 304 psb->pCmd = NULL; 305 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 306 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 307 } 308 309 static int 310 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 311 { 312 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 313 struct scatterlist *sgel = NULL; 314 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 315 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 316 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 317 dma_addr_t physaddr; 318 uint32_t i, num_bde = 0; 319 int nseg, datadir = scsi_cmnd->sc_data_direction; 320 321 /* 322 * There are three possibilities here - use scatter-gather segment, use 323 * the single mapping, or neither. Start the lpfc command prep by 324 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 325 * data bde entry. 326 */ 327 bpl += 2; 328 if (scsi_sg_count(scsi_cmnd)) { 329 /* 330 * The driver stores the segment count returned from pci_map_sg 331 * because this a count of dma-mappings used to map the use_sg 332 * pages. They are not guaranteed to be the same for those 333 * architectures that implement an IOMMU. 334 */ 335 336 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 337 scsi_sg_count(scsi_cmnd), datadir); 338 if (unlikely(!nseg)) 339 return 1; 340 341 lpfc_cmd->seg_cnt = nseg; 342 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 343 printk(KERN_ERR "%s: Too many sg segments from " 344 "dma_map_sg. Config %d, seg_cnt %d", 345 __FUNCTION__, phba->cfg_sg_seg_cnt, 346 lpfc_cmd->seg_cnt); 347 scsi_dma_unmap(scsi_cmnd); 348 return 1; 349 } 350 351 /* 352 * The driver established a maximum scatter-gather segment count 353 * during probe that limits the number of sg elements in any 354 * single scsi command. Just run through the seg_cnt and format 355 * the bde's. 356 */ 357 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { 358 physaddr = sg_dma_address(sgel); 359 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 360 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 361 bpl->tus.f.bdeSize = sg_dma_len(sgel); 362 if (datadir == DMA_TO_DEVICE) 363 bpl->tus.f.bdeFlags = 0; 364 else 365 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 366 bpl->tus.w = le32_to_cpu(bpl->tus.w); 367 bpl++; 368 num_bde++; 369 } 370 } 371 372 /* 373 * Finish initializing those IOCB fields that are dependent on the 374 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 375 * reinitialized since all iocb memory resources are used many times 376 * for transmit, receive, and continuation bpl's. 377 */ 378 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 379 iocb_cmd->un.fcpi64.bdl.bdeSize += 380 (num_bde * sizeof (struct ulp_bde64)); 381 iocb_cmd->ulpBdeCount = 1; 382 iocb_cmd->ulpLe = 1; 383 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); 384 return 0; 385 } 386 387 static void 388 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 389 { 390 /* 391 * There are only two special cases to consider. (1) the scsi command 392 * requested scatter-gather usage or (2) the scsi command allocated 393 * a request buffer, but did not request use_sg. There is a third 394 * case, but it does not require resource deallocation. 395 */ 396 if (psb->seg_cnt > 0) 397 scsi_dma_unmap(psb->pCmd); 398 } 399 400 static void 401 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 402 struct lpfc_iocbq *rsp_iocb) 403 { 404 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 405 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 406 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 407 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 408 uint32_t resp_info = fcprsp->rspStatus2; 409 uint32_t scsi_status = fcprsp->rspStatus3; 410 uint32_t *lp; 411 uint32_t host_status = DID_OK; 412 uint32_t rsplen = 0; 413 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 414 415 /* 416 * If this is a task management command, there is no 417 * scsi packet associated with this lpfc_cmd. The driver 418 * consumes it. 419 */ 420 if (fcpcmd->fcpCntl2) { 421 scsi_status = 0; 422 goto out; 423 } 424 425 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 426 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 427 if (snslen > SCSI_SENSE_BUFFERSIZE) 428 snslen = SCSI_SENSE_BUFFERSIZE; 429 430 if (resp_info & RSP_LEN_VALID) 431 rsplen = be32_to_cpu(fcprsp->rspRspLen); 432 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 433 } 434 lp = (uint32_t *)cmnd->sense_buffer; 435 436 if (!scsi_status && (resp_info & RESID_UNDER)) 437 logit = LOG_FCP; 438 439 lpfc_printf_vlog(vport, KERN_WARNING, logit, 440 "0730 FCP command x%x failed: x%x SNS x%x x%x " 441 "Data: x%x x%x x%x x%x x%x\n", 442 cmnd->cmnd[0], scsi_status, 443 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 444 be32_to_cpu(fcprsp->rspResId), 445 be32_to_cpu(fcprsp->rspSnsLen), 446 be32_to_cpu(fcprsp->rspRspLen), 447 fcprsp->rspInfo3); 448 449 if (resp_info & RSP_LEN_VALID) { 450 rsplen = be32_to_cpu(fcprsp->rspRspLen); 451 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 452 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 453 host_status = DID_ERROR; 454 goto out; 455 } 456 } 457 458 scsi_set_resid(cmnd, 0); 459 if (resp_info & RESID_UNDER) { 460 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 461 462 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 463 "0716 FCP Read Underrun, expected %d, " 464 "residual %d Data: x%x x%x x%x\n", 465 be32_to_cpu(fcpcmd->fcpDl), 466 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 467 cmnd->underflow); 468 469 /* 470 * If there is an under run check if under run reported by 471 * storage array is same as the under run reported by HBA. 472 * If this is not same, there is a dropped frame. 473 */ 474 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 475 fcpi_parm && 476 (scsi_get_resid(cmnd) != fcpi_parm)) { 477 lpfc_printf_vlog(vport, KERN_WARNING, 478 LOG_FCP | LOG_FCP_ERROR, 479 "0735 FCP Read Check Error " 480 "and Underrun Data: x%x x%x x%x x%x\n", 481 be32_to_cpu(fcpcmd->fcpDl), 482 scsi_get_resid(cmnd), fcpi_parm, 483 cmnd->cmnd[0]); 484 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 485 host_status = DID_ERROR; 486 } 487 /* 488 * The cmnd->underflow is the minimum number of bytes that must 489 * be transfered for this command. Provided a sense condition 490 * is not present, make sure the actual amount transferred is at 491 * least the underflow value or fail. 492 */ 493 if (!(resp_info & SNS_LEN_VALID) && 494 (scsi_status == SAM_STAT_GOOD) && 495 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 496 < cmnd->underflow)) { 497 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 498 "0717 FCP command x%x residual " 499 "underrun converted to error " 500 "Data: x%x x%x x%x\n", 501 cmnd->cmnd[0], scsi_bufflen(cmnd), 502 scsi_get_resid(cmnd), cmnd->underflow); 503 host_status = DID_ERROR; 504 } 505 } else if (resp_info & RESID_OVER) { 506 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 507 "0720 FCP command x%x residual overrun error. " 508 "Data: x%x x%x \n", cmnd->cmnd[0], 509 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 510 host_status = DID_ERROR; 511 512 /* 513 * Check SLI validation that all the transfer was actually done 514 * (fcpi_parm should be zero). Apply check only to reads. 515 */ 516 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 517 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 518 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 519 "0734 FCP Read Check Error Data: " 520 "x%x x%x x%x x%x\n", 521 be32_to_cpu(fcpcmd->fcpDl), 522 be32_to_cpu(fcprsp->rspResId), 523 fcpi_parm, cmnd->cmnd[0]); 524 host_status = DID_ERROR; 525 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 526 } 527 528 out: 529 cmnd->result = ScsiResult(host_status, scsi_status); 530 } 531 532 static void 533 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 534 struct lpfc_iocbq *pIocbOut) 535 { 536 struct lpfc_scsi_buf *lpfc_cmd = 537 (struct lpfc_scsi_buf *) pIocbIn->context1; 538 struct lpfc_vport *vport = pIocbIn->vport; 539 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 540 struct lpfc_nodelist *pnode = rdata->pnode; 541 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 542 int result; 543 struct scsi_device *sdev, *tmp_sdev; 544 int depth = 0; 545 546 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 547 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 548 549 if (lpfc_cmd->status) { 550 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 551 (lpfc_cmd->result & IOERR_DRVR_MASK)) 552 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 553 else if (lpfc_cmd->status >= IOSTAT_CNT) 554 lpfc_cmd->status = IOSTAT_DEFAULT; 555 556 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 557 "0729 FCP cmd x%x failed <%d/%d> " 558 "status: x%x result: x%x Data: x%x x%x\n", 559 cmd->cmnd[0], 560 cmd->device ? cmd->device->id : 0xffff, 561 cmd->device ? cmd->device->lun : 0xffff, 562 lpfc_cmd->status, lpfc_cmd->result, 563 pIocbOut->iocb.ulpContext, 564 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 565 566 switch (lpfc_cmd->status) { 567 case IOSTAT_FCP_RSP_ERROR: 568 /* Call FCP RSP handler to determine result */ 569 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 570 break; 571 case IOSTAT_NPORT_BSY: 572 case IOSTAT_FABRIC_BSY: 573 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 574 break; 575 case IOSTAT_LOCAL_REJECT: 576 if (lpfc_cmd->result == RJT_UNAVAIL_PERM || 577 lpfc_cmd->result == IOERR_NO_RESOURCES || 578 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 579 cmd->result = ScsiResult(DID_REQUEUE, 0); 580 break; 581 } /* else: fall through */ 582 default: 583 cmd->result = ScsiResult(DID_ERROR, 0); 584 break; 585 } 586 587 if ((pnode == NULL ) 588 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 589 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 590 } else { 591 cmd->result = ScsiResult(DID_OK, 0); 592 } 593 594 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 595 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 596 597 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 598 "0710 Iodone <%d/%d> cmd %p, error " 599 "x%x SNS x%x x%x Data: x%x x%x\n", 600 cmd->device->id, cmd->device->lun, cmd, 601 cmd->result, *lp, *(lp + 3), cmd->retries, 602 scsi_get_resid(cmd)); 603 } 604 605 result = cmd->result; 606 sdev = cmd->device; 607 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 608 cmd->scsi_done(cmd); 609 610 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 611 lpfc_release_scsi_buf(phba, lpfc_cmd); 612 return; 613 } 614 615 616 if (!result) 617 lpfc_rampup_queue_depth(vport, sdev); 618 619 if (!result && pnode != NULL && 620 ((jiffies - pnode->last_ramp_up_time) > 621 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 622 ((jiffies - pnode->last_q_full_time) > 623 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 624 (vport->cfg_lun_queue_depth > sdev->queue_depth)) { 625 shost_for_each_device(tmp_sdev, sdev->host) { 626 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){ 627 if (tmp_sdev->id != sdev->id) 628 continue; 629 if (tmp_sdev->ordered_tags) 630 scsi_adjust_queue_depth(tmp_sdev, 631 MSG_ORDERED_TAG, 632 tmp_sdev->queue_depth+1); 633 else 634 scsi_adjust_queue_depth(tmp_sdev, 635 MSG_SIMPLE_TAG, 636 tmp_sdev->queue_depth+1); 637 638 pnode->last_ramp_up_time = jiffies; 639 } 640 } 641 } 642 643 /* 644 * Check for queue full. If the lun is reporting queue full, then 645 * back off the lun queue depth to prevent target overloads. 646 */ 647 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 648 pnode->last_q_full_time = jiffies; 649 650 shost_for_each_device(tmp_sdev, sdev->host) { 651 if (tmp_sdev->id != sdev->id) 652 continue; 653 depth = scsi_track_queue_full(tmp_sdev, 654 tmp_sdev->queue_depth - 1); 655 } 656 /* 657 * The queue depth cannot be lowered any more. 658 * Modify the returned error code to store 659 * the final depth value set by 660 * scsi_track_queue_full. 661 */ 662 if (depth == -1) 663 depth = sdev->host->cmd_per_lun; 664 665 if (depth) { 666 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 667 "0711 detected queue full - lun queue " 668 "depth adjusted to %d.\n", depth); 669 } 670 } 671 672 lpfc_release_scsi_buf(phba, lpfc_cmd); 673 } 674 675 static void 676 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 677 struct lpfc_nodelist *pnode) 678 { 679 struct lpfc_hba *phba = vport->phba; 680 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 681 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 682 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 683 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 684 int datadir = scsi_cmnd->sc_data_direction; 685 686 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 687 /* clear task management bits */ 688 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 689 690 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 691 &lpfc_cmd->fcp_cmnd->fcp_lun); 692 693 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 694 695 if (scsi_cmnd->device->tagged_supported) { 696 switch (scsi_cmnd->tag) { 697 case HEAD_OF_QUEUE_TAG: 698 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 699 break; 700 case ORDERED_QUEUE_TAG: 701 fcp_cmnd->fcpCntl1 = ORDERED_Q; 702 break; 703 default: 704 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 705 break; 706 } 707 } else 708 fcp_cmnd->fcpCntl1 = 0; 709 710 /* 711 * There are three possibilities here - use scatter-gather segment, use 712 * the single mapping, or neither. Start the lpfc command prep by 713 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 714 * data bde entry. 715 */ 716 if (scsi_sg_count(scsi_cmnd)) { 717 if (datadir == DMA_TO_DEVICE) { 718 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 719 iocb_cmd->un.fcpi.fcpi_parm = 0; 720 iocb_cmd->ulpPU = 0; 721 fcp_cmnd->fcpCntl3 = WRITE_DATA; 722 phba->fc4OutputRequests++; 723 } else { 724 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 725 iocb_cmd->ulpPU = PARM_READ_CHECK; 726 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 727 fcp_cmnd->fcpCntl3 = READ_DATA; 728 phba->fc4InputRequests++; 729 } 730 } else { 731 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 732 iocb_cmd->un.fcpi.fcpi_parm = 0; 733 iocb_cmd->ulpPU = 0; 734 fcp_cmnd->fcpCntl3 = 0; 735 phba->fc4ControlRequests++; 736 } 737 738 /* 739 * Finish initializing those IOCB fields that are independent 740 * of the scsi_cmnd request_buffer 741 */ 742 piocbq->iocb.ulpContext = pnode->nlp_rpi; 743 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 744 piocbq->iocb.ulpFCP2Rcvy = 1; 745 746 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 747 piocbq->context1 = lpfc_cmd; 748 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 749 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 750 piocbq->vport = vport; 751 } 752 753 static int 754 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 755 struct lpfc_scsi_buf *lpfc_cmd, 756 unsigned int lun, 757 uint8_t task_mgmt_cmd) 758 { 759 struct lpfc_iocbq *piocbq; 760 IOCB_t *piocb; 761 struct fcp_cmnd *fcp_cmnd; 762 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 763 struct lpfc_nodelist *ndlp = rdata->pnode; 764 765 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 766 return 0; 767 } 768 769 piocbq = &(lpfc_cmd->cur_iocbq); 770 piocbq->vport = vport; 771 772 piocb = &piocbq->iocb; 773 774 fcp_cmnd = lpfc_cmd->fcp_cmnd; 775 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 776 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 777 778 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 779 780 piocb->ulpContext = ndlp->nlp_rpi; 781 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 782 piocb->ulpFCP2Rcvy = 1; 783 } 784 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 785 786 /* ulpTimeout is only one byte */ 787 if (lpfc_cmd->timeout > 0xff) { 788 /* 789 * Do not timeout the command at the firmware level. 790 * The driver will provide the timeout mechanism. 791 */ 792 piocb->ulpTimeout = 0; 793 } else { 794 piocb->ulpTimeout = lpfc_cmd->timeout; 795 } 796 797 return 1; 798 } 799 800 static void 801 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 802 struct lpfc_iocbq *cmdiocbq, 803 struct lpfc_iocbq *rspiocbq) 804 { 805 struct lpfc_scsi_buf *lpfc_cmd = 806 (struct lpfc_scsi_buf *) cmdiocbq->context1; 807 if (lpfc_cmd) 808 lpfc_release_scsi_buf(phba, lpfc_cmd); 809 return; 810 } 811 812 static int 813 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 814 unsigned tgt_id, unsigned int lun, 815 struct lpfc_rport_data *rdata) 816 { 817 struct lpfc_hba *phba = vport->phba; 818 struct lpfc_iocbq *iocbq; 819 struct lpfc_iocbq *iocbqrsp; 820 int ret; 821 822 if (!rdata->pnode) 823 return FAILED; 824 825 lpfc_cmd->rdata = rdata; 826 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 827 FCP_TARGET_RESET); 828 if (!ret) 829 return FAILED; 830 831 iocbq = &lpfc_cmd->cur_iocbq; 832 iocbqrsp = lpfc_sli_get_iocbq(phba); 833 834 if (!iocbqrsp) 835 return FAILED; 836 837 /* Issue Target Reset to TGT <num> */ 838 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 839 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", 840 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 841 ret = lpfc_sli_issue_iocb_wait(phba, 842 &phba->sli.ring[phba->sli.fcp_ring], 843 iocbq, iocbqrsp, lpfc_cmd->timeout); 844 if (ret != IOCB_SUCCESS) { 845 if (ret == IOCB_TIMEDOUT) 846 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 847 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 848 } else { 849 ret = SUCCESS; 850 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 851 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 853 (lpfc_cmd->result & IOERR_DRVR_MASK)) 854 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 855 } 856 857 lpfc_sli_release_iocbq(phba, iocbqrsp); 858 return ret; 859 } 860 861 const char * 862 lpfc_info(struct Scsi_Host *host) 863 { 864 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 865 struct lpfc_hba *phba = vport->phba; 866 int len; 867 static char lpfcinfobuf[384]; 868 869 memset(lpfcinfobuf,0,384); 870 if (phba && phba->pcidev){ 871 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 872 len = strlen(lpfcinfobuf); 873 snprintf(lpfcinfobuf + len, 874 384-len, 875 " on PCI bus %02x device %02x irq %d", 876 phba->pcidev->bus->number, 877 phba->pcidev->devfn, 878 phba->pcidev->irq); 879 len = strlen(lpfcinfobuf); 880 if (phba->Port[0]) { 881 snprintf(lpfcinfobuf + len, 882 384-len, 883 " port %s", 884 phba->Port); 885 } 886 } 887 return lpfcinfobuf; 888 } 889 890 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 891 { 892 unsigned long poll_tmo_expires = 893 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 894 895 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 896 mod_timer(&phba->fcp_poll_timer, 897 poll_tmo_expires); 898 } 899 900 void lpfc_poll_start_timer(struct lpfc_hba * phba) 901 { 902 lpfc_poll_rearm_timer(phba); 903 } 904 905 void lpfc_poll_timeout(unsigned long ptr) 906 { 907 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 908 909 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 910 lpfc_sli_poll_fcp_ring (phba); 911 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 912 lpfc_poll_rearm_timer(phba); 913 } 914 } 915 916 static int 917 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 918 { 919 struct Scsi_Host *shost = cmnd->device->host; 920 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 921 struct lpfc_hba *phba = vport->phba; 922 struct lpfc_sli *psli = &phba->sli; 923 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 924 struct lpfc_nodelist *ndlp = rdata->pnode; 925 struct lpfc_scsi_buf *lpfc_cmd; 926 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 927 int err; 928 929 err = fc_remote_port_chkready(rport); 930 if (err) { 931 cmnd->result = err; 932 goto out_fail_command; 933 } 934 935 /* 936 * Catch race where our node has transitioned, but the 937 * transport is still transitioning. 938 */ 939 if (!ndlp) { 940 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 941 goto out_fail_command; 942 } 943 lpfc_cmd = lpfc_get_scsi_buf(phba); 944 if (lpfc_cmd == NULL) { 945 lpfc_adjust_queue_depth(phba); 946 947 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 948 "0707 driver's buffer pool is empty, " 949 "IO busied\n"); 950 goto out_host_busy; 951 } 952 953 /* 954 * Store the midlayer's command structure for the completion phase 955 * and complete the command initialization. 956 */ 957 lpfc_cmd->pCmd = cmnd; 958 lpfc_cmd->rdata = rdata; 959 lpfc_cmd->timeout = 0; 960 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 961 cmnd->scsi_done = done; 962 963 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 964 if (err) 965 goto out_host_busy_free_buf; 966 967 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 968 969 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 970 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 971 if (err) 972 goto out_host_busy_free_buf; 973 974 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 975 lpfc_sli_poll_fcp_ring(phba); 976 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 977 lpfc_poll_rearm_timer(phba); 978 } 979 980 return 0; 981 982 out_host_busy_free_buf: 983 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 984 lpfc_release_scsi_buf(phba, lpfc_cmd); 985 out_host_busy: 986 return SCSI_MLQUEUE_HOST_BUSY; 987 988 out_fail_command: 989 done(cmnd); 990 return 0; 991 } 992 993 static void 994 lpfc_block_error_handler(struct scsi_cmnd *cmnd) 995 { 996 struct Scsi_Host *shost = cmnd->device->host; 997 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 998 999 spin_lock_irq(shost->host_lock); 1000 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 1001 spin_unlock_irq(shost->host_lock); 1002 msleep(1000); 1003 spin_lock_irq(shost->host_lock); 1004 } 1005 spin_unlock_irq(shost->host_lock); 1006 return; 1007 } 1008 1009 static int 1010 lpfc_abort_handler(struct scsi_cmnd *cmnd) 1011 { 1012 struct Scsi_Host *shost = cmnd->device->host; 1013 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1014 struct lpfc_hba *phba = vport->phba; 1015 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1016 struct lpfc_iocbq *iocb; 1017 struct lpfc_iocbq *abtsiocb; 1018 struct lpfc_scsi_buf *lpfc_cmd; 1019 IOCB_t *cmd, *icmd; 1020 unsigned int loop_count = 0; 1021 int ret = SUCCESS; 1022 1023 lpfc_block_error_handler(cmnd); 1024 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1025 BUG_ON(!lpfc_cmd); 1026 1027 /* 1028 * If pCmd field of the corresponding lpfc_scsi_buf structure 1029 * points to a different SCSI command, then the driver has 1030 * already completed this command, but the midlayer did not 1031 * see the completion before the eh fired. Just return 1032 * SUCCESS. 1033 */ 1034 iocb = &lpfc_cmd->cur_iocbq; 1035 if (lpfc_cmd->pCmd != cmnd) 1036 goto out; 1037 1038 BUG_ON(iocb->context1 != lpfc_cmd); 1039 1040 abtsiocb = lpfc_sli_get_iocbq(phba); 1041 if (abtsiocb == NULL) { 1042 ret = FAILED; 1043 goto out; 1044 } 1045 1046 /* 1047 * The scsi command can not be in txq and it is in flight because the 1048 * pCmd is still pointig at the SCSI command we have to abort. There 1049 * is no need to search the txcmplq. Just send an abort to the FW. 1050 */ 1051 1052 cmd = &iocb->iocb; 1053 icmd = &abtsiocb->iocb; 1054 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 1055 icmd->un.acxri.abortContextTag = cmd->ulpContext; 1056 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 1057 1058 icmd->ulpLe = 1; 1059 icmd->ulpClass = cmd->ulpClass; 1060 if (lpfc_is_link_up(phba)) 1061 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1062 else 1063 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1064 1065 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1066 abtsiocb->vport = vport; 1067 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1068 lpfc_sli_release_iocbq(phba, abtsiocb); 1069 ret = FAILED; 1070 goto out; 1071 } 1072 1073 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1074 lpfc_sli_poll_fcp_ring (phba); 1075 1076 /* Wait for abort to complete */ 1077 while (lpfc_cmd->pCmd == cmnd) 1078 { 1079 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1080 lpfc_sli_poll_fcp_ring (phba); 1081 1082 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); 1083 if (++loop_count 1084 > (2 * vport->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1085 break; 1086 } 1087 1088 if (lpfc_cmd->pCmd == cmnd) { 1089 ret = FAILED; 1090 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1091 "0748 abort handler timed out waiting " 1092 "for abort to complete: ret %#x, ID %d, " 1093 "LUN %d, snum %#lx\n", 1094 ret, cmnd->device->id, cmnd->device->lun, 1095 cmnd->serial_number); 1096 } 1097 1098 out: 1099 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1100 "0749 SCSI Layer I/O Abort Request Status x%x ID %d " 1101 "LUN %d snum %#lx\n", ret, cmnd->device->id, 1102 cmnd->device->lun, cmnd->serial_number); 1103 return ret; 1104 } 1105 1106 static int 1107 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1108 { 1109 struct Scsi_Host *shost = cmnd->device->host; 1110 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1111 struct lpfc_hba *phba = vport->phba; 1112 struct lpfc_scsi_buf *lpfc_cmd; 1113 struct lpfc_iocbq *iocbq, *iocbqrsp; 1114 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1115 struct lpfc_nodelist *pnode = rdata->pnode; 1116 uint32_t cmd_result = 0, cmd_status = 0; 1117 int ret = FAILED; 1118 int iocb_status = IOCB_SUCCESS; 1119 int cnt, loopcnt; 1120 1121 lpfc_block_error_handler(cmnd); 1122 loopcnt = 0; 1123 /* 1124 * If target is not in a MAPPED state, delay the reset until 1125 * target is rediscovered or devloss timeout expires. 1126 */ 1127 while (1) { 1128 if (!pnode) 1129 goto out; 1130 1131 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1132 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1133 loopcnt++; 1134 rdata = cmnd->device->hostdata; 1135 if (!rdata || 1136 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){ 1137 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1138 "0721 LUN Reset rport " 1139 "failure: cnt x%x rdata x%p\n", 1140 loopcnt, rdata); 1141 goto out; 1142 } 1143 pnode = rdata->pnode; 1144 if (!pnode) 1145 goto out; 1146 } 1147 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1148 break; 1149 } 1150 1151 lpfc_cmd = lpfc_get_scsi_buf(phba); 1152 if (lpfc_cmd == NULL) 1153 goto out; 1154 1155 lpfc_cmd->timeout = 60; 1156 lpfc_cmd->rdata = rdata; 1157 1158 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1159 FCP_TARGET_RESET); 1160 if (!ret) 1161 goto out_free_scsi_buf; 1162 1163 iocbq = &lpfc_cmd->cur_iocbq; 1164 1165 /* get a buffer for this IOCB command response */ 1166 iocbqrsp = lpfc_sli_get_iocbq(phba); 1167 if (iocbqrsp == NULL) 1168 goto out_free_scsi_buf; 1169 1170 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 1171 "0703 Issue target reset to TGT %d LUN %d " 1172 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 1173 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1174 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1175 &phba->sli.ring[phba->sli.fcp_ring], 1176 iocbq, iocbqrsp, lpfc_cmd->timeout); 1177 1178 if (iocb_status == IOCB_TIMEDOUT) 1179 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1180 1181 if (iocb_status == IOCB_SUCCESS) 1182 ret = SUCCESS; 1183 else 1184 ret = iocb_status; 1185 1186 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1187 cmd_status = iocbqrsp->iocb.ulpStatus; 1188 1189 lpfc_sli_release_iocbq(phba, iocbqrsp); 1190 1191 /* 1192 * All outstanding txcmplq I/Os should have been aborted by the device. 1193 * Unfortunately, some targets do not abide by this forcing the driver 1194 * to double check. 1195 */ 1196 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 1197 LPFC_CTX_LUN); 1198 if (cnt) 1199 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1200 cmnd->device->id, cmnd->device->lun, 1201 LPFC_CTX_LUN); 1202 loopcnt = 0; 1203 while(cnt) { 1204 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1205 1206 if (++loopcnt 1207 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1208 break; 1209 1210 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 1211 cmnd->device->lun, LPFC_CTX_LUN); 1212 } 1213 1214 if (cnt) { 1215 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1216 "0719 device reset I/O flush failure: " 1217 "cnt x%x\n", cnt); 1218 ret = FAILED; 1219 } 1220 1221 out_free_scsi_buf: 1222 if (iocb_status != IOCB_TIMEDOUT) { 1223 lpfc_release_scsi_buf(phba, lpfc_cmd); 1224 } 1225 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1226 "0713 SCSI layer issued device reset (%d, %d) " 1227 "return x%x status x%x result x%x\n", 1228 cmnd->device->id, cmnd->device->lun, ret, 1229 cmd_status, cmd_result); 1230 out: 1231 return ret; 1232 } 1233 1234 static int 1235 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1236 { 1237 struct Scsi_Host *shost = cmnd->device->host; 1238 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1239 struct lpfc_hba *phba = vport->phba; 1240 struct lpfc_nodelist *ndlp = NULL; 1241 int match; 1242 int ret = FAILED, i, err_count = 0; 1243 int cnt, loopcnt; 1244 struct lpfc_scsi_buf * lpfc_cmd; 1245 1246 lpfc_block_error_handler(cmnd); 1247 1248 lpfc_cmd = lpfc_get_scsi_buf(phba); 1249 if (lpfc_cmd == NULL) 1250 goto out; 1251 1252 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1253 lpfc_cmd->timeout = 60; 1254 1255 /* 1256 * Since the driver manages a single bus device, reset all 1257 * targets known to the driver. Should any target reset 1258 * fail, this routine returns failure to the midlayer. 1259 */ 1260 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1261 /* Search for mapped node by target ID */ 1262 match = 0; 1263 spin_lock_irq(shost->host_lock); 1264 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1265 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1266 i == ndlp->nlp_sid && 1267 ndlp->rport) { 1268 match = 1; 1269 break; 1270 } 1271 } 1272 spin_unlock_irq(shost->host_lock); 1273 if (!match) 1274 continue; 1275 1276 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1277 cmnd->device->lun, 1278 ndlp->rport->dd_data); 1279 if (ret != SUCCESS) { 1280 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1281 "0700 Bus Reset on target %d failed\n", 1282 i); 1283 err_count++; 1284 break; 1285 } 1286 } 1287 1288 if (ret != IOCB_TIMEDOUT) 1289 lpfc_release_scsi_buf(phba, lpfc_cmd); 1290 1291 if (err_count == 0) 1292 ret = SUCCESS; 1293 else 1294 ret = FAILED; 1295 1296 /* 1297 * All outstanding txcmplq I/Os should have been aborted by 1298 * the targets. Unfortunately, some targets do not abide by 1299 * this forcing the driver to double check. 1300 */ 1301 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1302 if (cnt) 1303 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 1304 0, 0, LPFC_CTX_HOST); 1305 loopcnt = 0; 1306 while(cnt) { 1307 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1308 1309 if (++loopcnt 1310 > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1311 break; 1312 1313 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 1314 } 1315 1316 if (cnt) { 1317 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1318 "0715 Bus Reset I/O flush failure: " 1319 "cnt x%x left x%x\n", cnt, i); 1320 ret = FAILED; 1321 } 1322 1323 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1324 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 1325 out: 1326 return ret; 1327 } 1328 1329 static int 1330 lpfc_slave_alloc(struct scsi_device *sdev) 1331 { 1332 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1333 struct lpfc_hba *phba = vport->phba; 1334 struct lpfc_scsi_buf *scsi_buf = NULL; 1335 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1336 uint32_t total = 0, i; 1337 uint32_t num_to_alloc = 0; 1338 unsigned long flags; 1339 1340 if (!rport || fc_remote_port_chkready(rport)) 1341 return -ENXIO; 1342 1343 sdev->hostdata = rport->dd_data; 1344 1345 /* 1346 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1347 * available list of scsi buffers. Don't allocate more than the 1348 * HBA limit conveyed to the midlayer via the host structure. The 1349 * formula accounts for the lun_queue_depth + error handlers + 1 1350 * extra. This list of scsi bufs exists for the lifetime of the driver. 1351 */ 1352 total = phba->total_scsi_bufs; 1353 num_to_alloc = vport->cfg_lun_queue_depth + 2; 1354 1355 /* Allow some exchanges to be available always to complete discovery */ 1356 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1357 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1358 "0704 At limitation of %d preallocated " 1359 "command buffers\n", total); 1360 return 0; 1361 /* Allow some exchanges to be available always to complete discovery */ 1362 } else if (total + num_to_alloc > 1363 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1364 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 1365 "0705 Allocation request of %d " 1366 "command buffers will exceed max of %d. " 1367 "Reducing allocation request to %d.\n", 1368 num_to_alloc, phba->cfg_hba_queue_depth, 1369 (phba->cfg_hba_queue_depth - total)); 1370 num_to_alloc = phba->cfg_hba_queue_depth - total; 1371 } 1372 1373 for (i = 0; i < num_to_alloc; i++) { 1374 scsi_buf = lpfc_new_scsi_buf(vport); 1375 if (!scsi_buf) { 1376 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 1377 "0706 Failed to allocate " 1378 "command buffer\n"); 1379 break; 1380 } 1381 1382 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1383 phba->total_scsi_bufs++; 1384 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1385 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1386 } 1387 return 0; 1388 } 1389 1390 static int 1391 lpfc_slave_configure(struct scsi_device *sdev) 1392 { 1393 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1394 struct lpfc_hba *phba = vport->phba; 1395 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1396 1397 if (sdev->tagged_supported) 1398 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); 1399 else 1400 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); 1401 1402 /* 1403 * Initialize the fc transport attributes for the target 1404 * containing this scsi device. Also note that the driver's 1405 * target pointer is stored in the starget_data for the 1406 * driver's sysfs entry point functions. 1407 */ 1408 rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1409 1410 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1411 lpfc_sli_poll_fcp_ring(phba); 1412 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1413 lpfc_poll_rearm_timer(phba); 1414 } 1415 1416 return 0; 1417 } 1418 1419 static void 1420 lpfc_slave_destroy(struct scsi_device *sdev) 1421 { 1422 sdev->hostdata = NULL; 1423 return; 1424 } 1425 1426 1427 struct scsi_host_template lpfc_template = { 1428 .module = THIS_MODULE, 1429 .name = LPFC_DRIVER_NAME, 1430 .info = lpfc_info, 1431 .queuecommand = lpfc_queuecommand, 1432 .eh_abort_handler = lpfc_abort_handler, 1433 .eh_device_reset_handler= lpfc_device_reset_handler, 1434 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1435 .slave_alloc = lpfc_slave_alloc, 1436 .slave_configure = lpfc_slave_configure, 1437 .slave_destroy = lpfc_slave_destroy, 1438 .scan_finished = lpfc_scan_finished, 1439 .this_id = -1, 1440 .sg_tablesize = LPFC_SG_SEG_CNT, 1441 .use_sg_chaining = ENABLE_SG_CHAINING, 1442 .cmd_per_lun = LPFC_CMD_PER_LUN, 1443 .use_clustering = ENABLE_CLUSTERING, 1444 .shost_attrs = lpfc_hba_attrs, 1445 .max_sectors = 0xFFFF, 1446 }; 1447 1448 struct scsi_host_template lpfc_vport_template = { 1449 .module = THIS_MODULE, 1450 .name = LPFC_DRIVER_NAME, 1451 .info = lpfc_info, 1452 .queuecommand = lpfc_queuecommand, 1453 .eh_abort_handler = lpfc_abort_handler, 1454 .eh_device_reset_handler= lpfc_device_reset_handler, 1455 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1456 .slave_alloc = lpfc_slave_alloc, 1457 .slave_configure = lpfc_slave_configure, 1458 .slave_destroy = lpfc_slave_destroy, 1459 .scan_finished = lpfc_scan_finished, 1460 .this_id = -1, 1461 .sg_tablesize = LPFC_SG_SEG_CNT, 1462 .cmd_per_lun = LPFC_CMD_PER_LUN, 1463 .use_clustering = ENABLE_CLUSTERING, 1464 .use_sg_chaining = ENABLE_SG_CHAINING, 1465 .shost_attrs = lpfc_vport_attrs, 1466 .max_sectors = 0xFFFF, 1467 }; 1468