1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_version.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_logmsg.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_vport.h" 41 42 #define LPFC_RESET_WAIT 2 43 #define LPFC_ABORT_WAIT 2 44 45 /* 46 * This function is called with no lock held when there is a resource 47 * error in driver or in firmware. 48 */ 49 void 50 lpfc_adjust_queue_depth(struct lpfc_hba *phba) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&phba->hbalock, flags); 55 atomic_inc(&phba->num_rsrc_err); 56 phba->last_rsrc_error_time = jiffies; 57 58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 59 spin_unlock_irqrestore(&phba->hbalock, flags); 60 return; 61 } 62 63 phba->last_ramp_down_time = jiffies; 64 65 spin_unlock_irqrestore(&phba->hbalock, flags); 66 67 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 68 if ((phba->pport->work_port_events & 69 WORKER_RAMP_DOWN_QUEUE) == 0) { 70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE; 71 } 72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 73 74 spin_lock_irqsave(&phba->hbalock, flags); 75 if (phba->work_wait) 76 wake_up(phba->work_wait); 77 spin_unlock_irqrestore(&phba->hbalock, flags); 78 79 return; 80 } 81 82 /* 83 * This function is called with no lock held when there is a successful 84 * SCSI command completion. 85 */ 86 static inline void 87 lpfc_rampup_queue_depth(struct lpfc_hba *phba, 88 struct scsi_device *sdev) 89 { 90 unsigned long flags; 91 atomic_inc(&phba->num_cmd_success); 92 93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth) 94 return; 95 96 spin_lock_irqsave(&phba->hbalock, flags); 97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) || 98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) { 99 spin_unlock_irqrestore(&phba->hbalock, flags); 100 return; 101 } 102 103 phba->last_ramp_up_time = jiffies; 104 spin_unlock_irqrestore(&phba->hbalock, flags); 105 106 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 107 if ((phba->pport->work_port_events & 108 WORKER_RAMP_UP_QUEUE) == 0) { 109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; 110 } 111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 112 113 spin_lock_irqsave(&phba->hbalock, flags); 114 if (phba->work_wait) 115 wake_up(phba->work_wait); 116 spin_unlock_irqrestore(&phba->hbalock, flags); 117 } 118 119 void 120 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) 121 { 122 struct lpfc_vport *vport; 123 struct Scsi_Host *host; 124 struct scsi_device *sdev; 125 unsigned long new_queue_depth; 126 unsigned long num_rsrc_err, num_cmd_success; 127 128 num_rsrc_err = atomic_read(&phba->num_rsrc_err); 129 num_cmd_success = atomic_read(&phba->num_cmd_success); 130 131 spin_lock_irq(&phba->hbalock); 132 list_for_each_entry(vport, &phba->port_list, listentry) { 133 host = lpfc_shost_from_vport(vport); 134 if (!scsi_host_get(host)) 135 continue; 136 137 spin_unlock_irq(&phba->hbalock); 138 139 shost_for_each_device(sdev, host) { 140 new_queue_depth = sdev->queue_depth * num_rsrc_err / 141 (num_rsrc_err + num_cmd_success); 142 if (!new_queue_depth) 143 new_queue_depth = sdev->queue_depth - 1; 144 else 145 new_queue_depth = 146 sdev->queue_depth - new_queue_depth; 147 148 if (sdev->ordered_tags) 149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 150 new_queue_depth); 151 else 152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 153 new_queue_depth); 154 } 155 spin_lock_irq(&phba->hbalock); 156 scsi_host_put(host); 157 } 158 spin_unlock_irq(&phba->hbalock); 159 atomic_set(&phba->num_rsrc_err, 0); 160 atomic_set(&phba->num_cmd_success, 0); 161 } 162 163 void 164 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) 165 { 166 struct lpfc_vport *vport; 167 struct Scsi_Host *host; 168 struct scsi_device *sdev; 169 170 spin_lock_irq(&phba->hbalock); 171 list_for_each_entry(vport, &phba->port_list, listentry) { 172 host = lpfc_shost_from_vport(vport); 173 if (!scsi_host_get(host)) 174 continue; 175 176 spin_unlock_irq(&phba->hbalock); 177 shost_for_each_device(sdev, host) { 178 if (sdev->ordered_tags) 179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 180 sdev->queue_depth+1); 181 else 182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 183 sdev->queue_depth+1); 184 } 185 spin_lock_irq(&phba->hbalock); 186 scsi_host_put(host); 187 } 188 spin_unlock_irq(&phba->hbalock); 189 atomic_set(&phba->num_rsrc_err, 0); 190 atomic_set(&phba->num_cmd_success, 0); 191 } 192 193 /* 194 * This routine allocates a scsi buffer, which contains all the necessary 195 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 196 * contains information to build the IOCB. The DMAable region contains 197 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 198 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 199 * and the BPL BDE is setup in the IOCB. 200 */ 201 static struct lpfc_scsi_buf * 202 lpfc_new_scsi_buf(struct lpfc_vport *vport) 203 { 204 struct lpfc_hba *phba = vport->phba; 205 struct lpfc_scsi_buf *psb; 206 struct ulp_bde64 *bpl; 207 IOCB_t *iocb; 208 dma_addr_t pdma_phys; 209 uint16_t iotag; 210 211 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 212 if (!psb) 213 return NULL; 214 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 215 216 /* 217 * Get memory from the pci pool to map the virt space to pci bus space 218 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 219 * struct fcp_rsp and the number of bde's necessary to support the 220 * sg_tablesize. 221 */ 222 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 223 &psb->dma_handle); 224 if (!psb->data) { 225 kfree(psb); 226 return NULL; 227 } 228 229 /* Initialize virtual ptrs to dma_buf region. */ 230 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 231 232 /* Allocate iotag for psb->cur_iocbq. */ 233 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 234 if (iotag == 0) { 235 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 236 psb->data, psb->dma_handle); 237 kfree (psb); 238 return NULL; 239 } 240 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 241 242 psb->fcp_cmnd = psb->data; 243 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 244 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 245 sizeof(struct fcp_rsp); 246 247 /* Initialize local short-hand pointers. */ 248 bpl = psb->fcp_bpl; 249 pdma_phys = psb->dma_handle; 250 251 /* 252 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 253 * list bdes. Initialize the first two and leave the rest for 254 * queuecommand. 255 */ 256 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 257 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 258 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 259 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 260 bpl->tus.w = le32_to_cpu(bpl->tus.w); 261 bpl++; 262 263 /* Setup the physical region for the FCP RSP */ 264 pdma_phys += sizeof (struct fcp_cmnd); 265 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 266 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 267 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 268 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 269 bpl->tus.w = le32_to_cpu(bpl->tus.w); 270 271 /* 272 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 273 * initialize it with all known data now. 274 */ 275 pdma_phys += (sizeof (struct fcp_rsp)); 276 iocb = &psb->cur_iocbq.iocb; 277 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 278 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 279 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 280 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 281 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 282 iocb->ulpBdeCount = 1; 283 iocb->ulpClass = CLASS3; 284 285 return psb; 286 } 287 288 static struct lpfc_scsi_buf* 289 lpfc_get_scsi_buf(struct lpfc_hba * phba) 290 { 291 struct lpfc_scsi_buf * lpfc_cmd = NULL; 292 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 293 unsigned long iflag = 0; 294 295 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 296 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 297 if (lpfc_cmd) { 298 lpfc_cmd->seg_cnt = 0; 299 lpfc_cmd->nonsg_phys = 0; 300 } 301 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 302 return lpfc_cmd; 303 } 304 305 static void 306 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 307 { 308 unsigned long iflag = 0; 309 310 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 311 psb->pCmd = NULL; 312 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 313 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 314 } 315 316 static int 317 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 318 { 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 320 struct scatterlist *sgel = NULL; 321 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 322 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 324 dma_addr_t physaddr; 325 uint32_t i, num_bde = 0; 326 int nseg, datadir = scsi_cmnd->sc_data_direction; 327 328 /* 329 * There are three possibilities here - use scatter-gather segment, use 330 * the single mapping, or neither. Start the lpfc command prep by 331 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 332 * data bde entry. 333 */ 334 bpl += 2; 335 if (scsi_sg_count(scsi_cmnd)) { 336 /* 337 * The driver stores the segment count returned from pci_map_sg 338 * because this a count of dma-mappings used to map the use_sg 339 * pages. They are not guaranteed to be the same for those 340 * architectures that implement an IOMMU. 341 */ 342 343 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd), 344 scsi_sg_count(scsi_cmnd), datadir); 345 if (unlikely(!nseg)) 346 return 1; 347 348 lpfc_cmd->seg_cnt = nseg; 349 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 350 printk(KERN_ERR "%s: Too many sg segments from " 351 "dma_map_sg. Config %d, seg_cnt %d", 352 __FUNCTION__, phba->cfg_sg_seg_cnt, 353 lpfc_cmd->seg_cnt); 354 scsi_dma_unmap(scsi_cmnd); 355 return 1; 356 } 357 358 /* 359 * The driver established a maximum scatter-gather segment count 360 * during probe that limits the number of sg elements in any 361 * single scsi command. Just run through the seg_cnt and format 362 * the bde's. 363 */ 364 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) { 365 physaddr = sg_dma_address(sgel); 366 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 367 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 368 bpl->tus.f.bdeSize = sg_dma_len(sgel); 369 if (datadir == DMA_TO_DEVICE) 370 bpl->tus.f.bdeFlags = 0; 371 else 372 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 373 bpl->tus.w = le32_to_cpu(bpl->tus.w); 374 bpl++; 375 num_bde++; 376 } 377 } 378 379 /* 380 * Finish initializing those IOCB fields that are dependent on the 381 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 382 * reinitialized since all iocb memory resources are used many times 383 * for transmit, receive, and continuation bpl's. 384 */ 385 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 386 iocb_cmd->un.fcpi64.bdl.bdeSize += 387 (num_bde * sizeof (struct ulp_bde64)); 388 iocb_cmd->ulpBdeCount = 1; 389 iocb_cmd->ulpLe = 1; 390 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd)); 391 return 0; 392 } 393 394 static void 395 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 396 { 397 /* 398 * There are only two special cases to consider. (1) the scsi command 399 * requested scatter-gather usage or (2) the scsi command allocated 400 * a request buffer, but did not request use_sg. There is a third 401 * case, but it does not require resource deallocation. 402 */ 403 if (psb->seg_cnt > 0) 404 scsi_dma_unmap(psb->pCmd); 405 } 406 407 static void 408 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 409 struct lpfc_iocbq *rsp_iocb) 410 { 411 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 412 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 413 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 414 struct lpfc_hba *phba = vport->phba; 415 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 416 uint32_t vpi = vport->vpi; 417 uint32_t resp_info = fcprsp->rspStatus2; 418 uint32_t scsi_status = fcprsp->rspStatus3; 419 uint32_t *lp; 420 uint32_t host_status = DID_OK; 421 uint32_t rsplen = 0; 422 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 423 424 /* 425 * If this is a task management command, there is no 426 * scsi packet associated with this lpfc_cmd. The driver 427 * consumes it. 428 */ 429 if (fcpcmd->fcpCntl2) { 430 scsi_status = 0; 431 goto out; 432 } 433 434 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 435 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 436 if (snslen > SCSI_SENSE_BUFFERSIZE) 437 snslen = SCSI_SENSE_BUFFERSIZE; 438 439 if (resp_info & RSP_LEN_VALID) 440 rsplen = be32_to_cpu(fcprsp->rspRspLen); 441 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 442 } 443 lp = (uint32_t *)cmnd->sense_buffer; 444 445 if (!scsi_status && (resp_info & RESID_UNDER)) 446 logit = LOG_FCP; 447 448 lpfc_printf_log(phba, KERN_WARNING, logit, 449 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x " 450 "Data: x%x x%x x%x x%x x%x\n", 451 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status, 452 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 453 be32_to_cpu(fcprsp->rspResId), 454 be32_to_cpu(fcprsp->rspSnsLen), 455 be32_to_cpu(fcprsp->rspRspLen), 456 fcprsp->rspInfo3); 457 458 if (resp_info & RSP_LEN_VALID) { 459 rsplen = be32_to_cpu(fcprsp->rspRspLen); 460 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 461 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 462 host_status = DID_ERROR; 463 goto out; 464 } 465 } 466 467 scsi_set_resid(cmnd, 0); 468 if (resp_info & RESID_UNDER) { 469 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 470 471 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 472 "%d (%d):0716 FCP Read Underrun, expected %d, " 473 "residual %d Data: x%x x%x x%x\n", 474 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl), 475 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0], 476 cmnd->underflow); 477 478 /* 479 * If there is an under run check if under run reported by 480 * storage array is same as the under run reported by HBA. 481 * If this is not same, there is a dropped frame. 482 */ 483 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 484 fcpi_parm && 485 (scsi_get_resid(cmnd) != fcpi_parm)) { 486 lpfc_printf_log(phba, KERN_WARNING, 487 LOG_FCP | LOG_FCP_ERROR, 488 "%d (%d):0735 FCP Read Check Error " 489 "and Underrun Data: x%x x%x x%x x%x\n", 490 phba->brd_no, vpi, 491 be32_to_cpu(fcpcmd->fcpDl), 492 scsi_get_resid(cmnd), fcpi_parm, 493 cmnd->cmnd[0]); 494 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 495 host_status = DID_ERROR; 496 } 497 /* 498 * The cmnd->underflow is the minimum number of bytes that must 499 * be transfered for this command. Provided a sense condition 500 * is not present, make sure the actual amount transferred is at 501 * least the underflow value or fail. 502 */ 503 if (!(resp_info & SNS_LEN_VALID) && 504 (scsi_status == SAM_STAT_GOOD) && 505 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) 506 < cmnd->underflow)) { 507 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 508 "%d (%d):0717 FCP command x%x residual " 509 "underrun converted to error " 510 "Data: x%x x%x x%x\n", 511 phba->brd_no, vpi, cmnd->cmnd[0], 512 scsi_bufflen(cmnd), 513 scsi_get_resid(cmnd), cmnd->underflow); 514 host_status = DID_ERROR; 515 } 516 } else if (resp_info & RESID_OVER) { 517 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 518 "%d (%d):0720 FCP command x%x residual " 519 "overrun error. Data: x%x x%x \n", 520 phba->brd_no, vpi, cmnd->cmnd[0], 521 scsi_bufflen(cmnd), scsi_get_resid(cmnd)); 522 host_status = DID_ERROR; 523 524 /* 525 * Check SLI validation that all the transfer was actually done 526 * (fcpi_parm should be zero). Apply check only to reads. 527 */ 528 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 529 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 530 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 531 "%d (%d):0734 FCP Read Check Error Data: " 532 "x%x x%x x%x x%x\n", 533 phba->brd_no, vpi, 534 be32_to_cpu(fcpcmd->fcpDl), 535 be32_to_cpu(fcprsp->rspResId), 536 fcpi_parm, cmnd->cmnd[0]); 537 host_status = DID_ERROR; 538 scsi_set_resid(cmnd, scsi_bufflen(cmnd)); 539 } 540 541 out: 542 cmnd->result = ScsiResult(host_status, scsi_status); 543 } 544 545 static void 546 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 547 struct lpfc_iocbq *pIocbOut) 548 { 549 struct lpfc_scsi_buf *lpfc_cmd = 550 (struct lpfc_scsi_buf *) pIocbIn->context1; 551 struct lpfc_vport *vport = pIocbIn->vport; 552 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 553 struct lpfc_nodelist *pnode = rdata->pnode; 554 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 555 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport 556 ? lpfc_cmd->cur_iocbq.vport->vpi 557 : 0); 558 int result; 559 struct scsi_device *sdev, *tmp_sdev; 560 int depth = 0; 561 562 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 563 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 564 565 if (lpfc_cmd->status) { 566 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 567 (lpfc_cmd->result & IOERR_DRVR_MASK)) 568 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 569 else if (lpfc_cmd->status >= IOSTAT_CNT) 570 lpfc_cmd->status = IOSTAT_DEFAULT; 571 572 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 573 "%d (%d):0729 FCP cmd x%x failed <%d/%d> " 574 "status: x%x result: x%x Data: x%x x%x\n", 575 phba->brd_no, vpi, cmd->cmnd[0], 576 cmd->device ? cmd->device->id : 0xffff, 577 cmd->device ? cmd->device->lun : 0xffff, 578 lpfc_cmd->status, lpfc_cmd->result, 579 pIocbOut->iocb.ulpContext, 580 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 581 582 switch (lpfc_cmd->status) { 583 case IOSTAT_FCP_RSP_ERROR: 584 /* Call FCP RSP handler to determine result */ 585 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut); 586 break; 587 case IOSTAT_NPORT_BSY: 588 case IOSTAT_FABRIC_BSY: 589 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 590 break; 591 case IOSTAT_LOCAL_REJECT: 592 if (lpfc_cmd->result == RJT_UNAVAIL_PERM || 593 lpfc_cmd->result == IOERR_NO_RESOURCES || 594 lpfc_cmd->result == RJT_LOGIN_REQUIRED) { 595 cmd->result = ScsiResult(DID_REQUEUE, 0); 596 break; 597 } /* else: fall through */ 598 default: 599 cmd->result = ScsiResult(DID_ERROR, 0); 600 break; 601 } 602 603 if ((pnode == NULL ) 604 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 605 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 606 } else { 607 cmd->result = ScsiResult(DID_OK, 0); 608 } 609 610 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 611 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 612 613 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 614 "%d (%d):0710 Iodone <%d/%d> cmd %p, error " 615 "x%x SNS x%x x%x Data: x%x x%x\n", 616 phba->brd_no, vpi, cmd->device->id, 617 cmd->device->lun, cmd, cmd->result, 618 *lp, *(lp + 3), cmd->retries, 619 scsi_get_resid(cmd)); 620 } 621 622 result = cmd->result; 623 sdev = cmd->device; 624 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 625 cmd->scsi_done(cmd); 626 627 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 628 lpfc_release_scsi_buf(phba, lpfc_cmd); 629 return; 630 } 631 632 633 if (!result) 634 lpfc_rampup_queue_depth(phba, sdev); 635 636 if (!result && pnode != NULL && 637 ((jiffies - pnode->last_ramp_up_time) > 638 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 639 ((jiffies - pnode->last_q_full_time) > 640 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 641 (phba->cfg_lun_queue_depth > sdev->queue_depth)) { 642 shost_for_each_device(tmp_sdev, sdev->host) { 643 if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) { 644 if (tmp_sdev->id != sdev->id) 645 continue; 646 if (tmp_sdev->ordered_tags) 647 scsi_adjust_queue_depth(tmp_sdev, 648 MSG_ORDERED_TAG, 649 tmp_sdev->queue_depth+1); 650 else 651 scsi_adjust_queue_depth(tmp_sdev, 652 MSG_SIMPLE_TAG, 653 tmp_sdev->queue_depth+1); 654 655 pnode->last_ramp_up_time = jiffies; 656 } 657 } 658 } 659 660 /* 661 * Check for queue full. If the lun is reporting queue full, then 662 * back off the lun queue depth to prevent target overloads. 663 */ 664 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 665 pnode->last_q_full_time = jiffies; 666 667 shost_for_each_device(tmp_sdev, sdev->host) { 668 if (tmp_sdev->id != sdev->id) 669 continue; 670 depth = scsi_track_queue_full(tmp_sdev, 671 tmp_sdev->queue_depth - 1); 672 } 673 /* 674 * The queue depth cannot be lowered any more. 675 * Modify the returned error code to store 676 * the final depth value set by 677 * scsi_track_queue_full. 678 */ 679 if (depth == -1) 680 depth = sdev->host->cmd_per_lun; 681 682 if (depth) { 683 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 684 "%d (%d):0711 detected queue full - " 685 "lun queue depth adjusted to %d.\n", 686 phba->brd_no, vpi, depth); 687 } 688 } 689 690 lpfc_release_scsi_buf(phba, lpfc_cmd); 691 } 692 693 static void 694 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 695 struct lpfc_nodelist *pnode) 696 { 697 struct lpfc_hba *phba = vport->phba; 698 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 699 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 701 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 702 int datadir = scsi_cmnd->sc_data_direction; 703 704 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 705 /* clear task management bits */ 706 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 707 708 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 709 &lpfc_cmd->fcp_cmnd->fcp_lun); 710 711 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 712 713 if (scsi_cmnd->device->tagged_supported) { 714 switch (scsi_cmnd->tag) { 715 case HEAD_OF_QUEUE_TAG: 716 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 717 break; 718 case ORDERED_QUEUE_TAG: 719 fcp_cmnd->fcpCntl1 = ORDERED_Q; 720 break; 721 default: 722 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 723 break; 724 } 725 } else 726 fcp_cmnd->fcpCntl1 = 0; 727 728 /* 729 * There are three possibilities here - use scatter-gather segment, use 730 * the single mapping, or neither. Start the lpfc command prep by 731 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 732 * data bde entry. 733 */ 734 if (scsi_sg_count(scsi_cmnd)) { 735 if (datadir == DMA_TO_DEVICE) { 736 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 737 iocb_cmd->un.fcpi.fcpi_parm = 0; 738 iocb_cmd->ulpPU = 0; 739 fcp_cmnd->fcpCntl3 = WRITE_DATA; 740 phba->fc4OutputRequests++; 741 } else { 742 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 743 iocb_cmd->ulpPU = PARM_READ_CHECK; 744 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); 745 fcp_cmnd->fcpCntl3 = READ_DATA; 746 phba->fc4InputRequests++; 747 } 748 } else { 749 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 750 iocb_cmd->un.fcpi.fcpi_parm = 0; 751 iocb_cmd->ulpPU = 0; 752 fcp_cmnd->fcpCntl3 = 0; 753 phba->fc4ControlRequests++; 754 } 755 756 /* 757 * Finish initializing those IOCB fields that are independent 758 * of the scsi_cmnd request_buffer 759 */ 760 piocbq->iocb.ulpContext = pnode->nlp_rpi; 761 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 762 piocbq->iocb.ulpFCP2Rcvy = 1; 763 764 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 765 piocbq->context1 = lpfc_cmd; 766 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 767 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 768 piocbq->vport = vport; 769 } 770 771 static int 772 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, 773 struct lpfc_scsi_buf *lpfc_cmd, 774 unsigned int lun, 775 uint8_t task_mgmt_cmd) 776 { 777 struct lpfc_iocbq *piocbq; 778 IOCB_t *piocb; 779 struct fcp_cmnd *fcp_cmnd; 780 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 781 struct lpfc_nodelist *ndlp = rdata->pnode; 782 783 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 784 return 0; 785 } 786 787 piocbq = &(lpfc_cmd->cur_iocbq); 788 piocbq->vport = vport; 789 790 piocb = &piocbq->iocb; 791 792 fcp_cmnd = lpfc_cmd->fcp_cmnd; 793 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 794 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 795 796 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 797 798 piocb->ulpContext = ndlp->nlp_rpi; 799 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 800 piocb->ulpFCP2Rcvy = 1; 801 } 802 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 803 804 /* ulpTimeout is only one byte */ 805 if (lpfc_cmd->timeout > 0xff) { 806 /* 807 * Do not timeout the command at the firmware level. 808 * The driver will provide the timeout mechanism. 809 */ 810 piocb->ulpTimeout = 0; 811 } else { 812 piocb->ulpTimeout = lpfc_cmd->timeout; 813 } 814 815 return 1; 816 } 817 818 static void 819 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 820 struct lpfc_iocbq *cmdiocbq, 821 struct lpfc_iocbq *rspiocbq) 822 { 823 struct lpfc_scsi_buf *lpfc_cmd = 824 (struct lpfc_scsi_buf *) cmdiocbq->context1; 825 if (lpfc_cmd) 826 lpfc_release_scsi_buf(phba, lpfc_cmd); 827 return; 828 } 829 830 static int 831 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, 832 unsigned tgt_id, unsigned int lun, 833 struct lpfc_rport_data *rdata) 834 { 835 struct lpfc_hba *phba = vport->phba; 836 struct lpfc_iocbq *iocbq; 837 struct lpfc_iocbq *iocbqrsp; 838 int ret; 839 840 if (!rdata->pnode) 841 return FAILED; 842 843 lpfc_cmd->rdata = rdata; 844 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 845 FCP_TARGET_RESET); 846 if (!ret) 847 return FAILED; 848 849 iocbq = &lpfc_cmd->cur_iocbq; 850 iocbqrsp = lpfc_sli_get_iocbq(phba); 851 852 if (!iocbqrsp) 853 return FAILED; 854 855 /* Issue Target Reset to TGT <num> */ 856 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 857 "%d (%d):0702 Issue Target Reset to TGT %d " 858 "Data: x%x x%x\n", 859 phba->brd_no, vport->vpi, tgt_id, 860 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); 861 862 ret = lpfc_sli_issue_iocb_wait(phba, 863 &phba->sli.ring[phba->sli.fcp_ring], 864 iocbq, iocbqrsp, lpfc_cmd->timeout); 865 if (ret != IOCB_SUCCESS) { 866 if (ret == IOCB_TIMEDOUT) 867 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 868 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 869 } else { 870 ret = SUCCESS; 871 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 872 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 873 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 874 (lpfc_cmd->result & IOERR_DRVR_MASK)) 875 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 876 } 877 878 lpfc_sli_release_iocbq(phba, iocbqrsp); 879 return ret; 880 } 881 882 const char * 883 lpfc_info(struct Scsi_Host *host) 884 { 885 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata; 886 struct lpfc_hba *phba = vport->phba; 887 int len; 888 static char lpfcinfobuf[384]; 889 890 memset(lpfcinfobuf,0,384); 891 if (phba && phba->pcidev){ 892 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 893 len = strlen(lpfcinfobuf); 894 snprintf(lpfcinfobuf + len, 895 384-len, 896 " on PCI bus %02x device %02x irq %d", 897 phba->pcidev->bus->number, 898 phba->pcidev->devfn, 899 phba->pcidev->irq); 900 len = strlen(lpfcinfobuf); 901 if (phba->Port[0]) { 902 snprintf(lpfcinfobuf + len, 903 384-len, 904 " port %s", 905 phba->Port); 906 } 907 } 908 return lpfcinfobuf; 909 } 910 911 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 912 { 913 unsigned long poll_tmo_expires = 914 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 915 916 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 917 mod_timer(&phba->fcp_poll_timer, 918 poll_tmo_expires); 919 } 920 921 void lpfc_poll_start_timer(struct lpfc_hba * phba) 922 { 923 lpfc_poll_rearm_timer(phba); 924 } 925 926 void lpfc_poll_timeout(unsigned long ptr) 927 { 928 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 929 930 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 931 lpfc_sli_poll_fcp_ring (phba); 932 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 933 lpfc_poll_rearm_timer(phba); 934 } 935 } 936 937 static int 938 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 939 { 940 struct Scsi_Host *shost = cmnd->device->host; 941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 942 struct lpfc_hba *phba = vport->phba; 943 struct lpfc_sli *psli = &phba->sli; 944 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 945 struct lpfc_nodelist *ndlp = rdata->pnode; 946 struct lpfc_scsi_buf *lpfc_cmd; 947 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 948 int err; 949 950 err = fc_remote_port_chkready(rport); 951 if (err) { 952 cmnd->result = err; 953 goto out_fail_command; 954 } 955 956 /* 957 * Catch race where our node has transitioned, but the 958 * transport is still transitioning. 959 */ 960 if (!ndlp) { 961 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 962 goto out_fail_command; 963 } 964 lpfc_cmd = lpfc_get_scsi_buf(phba); 965 if (lpfc_cmd == NULL) { 966 lpfc_adjust_queue_depth(phba); 967 968 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 969 "%d (%d):0707 driver's buffer pool is empty, " 970 "IO busied\n", 971 phba->brd_no, vport->vpi); 972 goto out_host_busy; 973 } 974 975 /* 976 * Store the midlayer's command structure for the completion phase 977 * and complete the command initialization. 978 */ 979 lpfc_cmd->pCmd = cmnd; 980 lpfc_cmd->rdata = rdata; 981 lpfc_cmd->timeout = 0; 982 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 983 cmnd->scsi_done = done; 984 985 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 986 if (err) 987 goto out_host_busy_free_buf; 988 989 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); 990 991 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 992 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 993 if (err) 994 goto out_host_busy_free_buf; 995 996 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 997 lpfc_sli_poll_fcp_ring(phba); 998 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 999 lpfc_poll_rearm_timer(phba); 1000 } 1001 1002 return 0; 1003 1004 out_host_busy_free_buf: 1005 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 1006 lpfc_release_scsi_buf(phba, lpfc_cmd); 1007 out_host_busy: 1008 return SCSI_MLQUEUE_HOST_BUSY; 1009 1010 out_fail_command: 1011 done(cmnd); 1012 return 0; 1013 } 1014 1015 static void 1016 lpfc_block_error_handler(struct scsi_cmnd *cmnd) 1017 { 1018 struct Scsi_Host *shost = cmnd->device->host; 1019 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1020 1021 spin_lock_irq(shost->host_lock); 1022 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 1023 spin_unlock_irq(shost->host_lock); 1024 msleep(1000); 1025 spin_lock_irq(shost->host_lock); 1026 } 1027 spin_unlock_irq(shost->host_lock); 1028 return; 1029 } 1030 1031 static int 1032 lpfc_abort_handler(struct scsi_cmnd *cmnd) 1033 { 1034 struct Scsi_Host *shost = cmnd->device->host; 1035 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1036 struct lpfc_hba *phba = vport->phba; 1037 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1038 struct lpfc_iocbq *iocb; 1039 struct lpfc_iocbq *abtsiocb; 1040 struct lpfc_scsi_buf *lpfc_cmd; 1041 IOCB_t *cmd, *icmd; 1042 unsigned int loop_count = 0; 1043 int ret = SUCCESS; 1044 1045 lpfc_block_error_handler(cmnd); 1046 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1047 BUG_ON(!lpfc_cmd); 1048 1049 /* 1050 * If pCmd field of the corresponding lpfc_scsi_buf structure 1051 * points to a different SCSI command, then the driver has 1052 * already completed this command, but the midlayer did not 1053 * see the completion before the eh fired. Just return 1054 * SUCCESS. 1055 */ 1056 iocb = &lpfc_cmd->cur_iocbq; 1057 if (lpfc_cmd->pCmd != cmnd) 1058 goto out; 1059 1060 BUG_ON(iocb->context1 != lpfc_cmd); 1061 1062 abtsiocb = lpfc_sli_get_iocbq(phba); 1063 if (abtsiocb == NULL) { 1064 ret = FAILED; 1065 goto out; 1066 } 1067 1068 /* 1069 * The scsi command can not be in txq and it is in flight because the 1070 * pCmd is still pointig at the SCSI command we have to abort. There 1071 * is no need to search the txcmplq. Just send an abort to the FW. 1072 */ 1073 1074 cmd = &iocb->iocb; 1075 icmd = &abtsiocb->iocb; 1076 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 1077 icmd->un.acxri.abortContextTag = cmd->ulpContext; 1078 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 1079 1080 icmd->ulpLe = 1; 1081 icmd->ulpClass = cmd->ulpClass; 1082 if (lpfc_is_link_up(phba)) 1083 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1084 else 1085 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1086 1087 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1088 abtsiocb->vport = vport; 1089 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1090 lpfc_sli_release_iocbq(phba, abtsiocb); 1091 ret = FAILED; 1092 goto out; 1093 } 1094 1095 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1096 lpfc_sli_poll_fcp_ring (phba); 1097 1098 /* Wait for abort to complete */ 1099 while (lpfc_cmd->pCmd == cmnd) 1100 { 1101 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1102 lpfc_sli_poll_fcp_ring (phba); 1103 1104 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ); 1105 if (++loop_count 1106 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1107 break; 1108 } 1109 1110 if (lpfc_cmd->pCmd == cmnd) { 1111 ret = FAILED; 1112 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1113 "%d (%d):0748 abort handler timed out waiting " 1114 "for abort to complete: ret %#x, ID %d, " 1115 "LUN %d, snum %#lx\n", 1116 phba->brd_no, vport->vpi, ret, 1117 cmnd->device->id, cmnd->device->lun, 1118 cmnd->serial_number); 1119 } 1120 1121 out: 1122 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1123 "%d (%d):0749 SCSI Layer I/O Abort Request " 1124 "Status x%x ID %d LUN %d snum %#lx\n", 1125 phba->brd_no, vport->vpi, ret, cmnd->device->id, 1126 cmnd->device->lun, cmnd->serial_number); 1127 1128 return ret; 1129 } 1130 1131 static int 1132 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1133 { 1134 struct Scsi_Host *shost = cmnd->device->host; 1135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1136 struct lpfc_hba *phba = vport->phba; 1137 struct lpfc_scsi_buf *lpfc_cmd; 1138 struct lpfc_iocbq *iocbq, *iocbqrsp; 1139 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1140 struct lpfc_nodelist *pnode = rdata->pnode; 1141 uint32_t cmd_result = 0, cmd_status = 0; 1142 int ret = FAILED; 1143 int iocb_status = IOCB_SUCCESS; 1144 int cnt, loopcnt; 1145 1146 lpfc_block_error_handler(cmnd); 1147 loopcnt = 0; 1148 /* 1149 * If target is not in a MAPPED state, delay the reset until 1150 * target is rediscovered or devloss timeout expires. 1151 */ 1152 while (1) { 1153 if (!pnode) 1154 goto out; 1155 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1157 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1158 loopcnt++; 1159 rdata = cmnd->device->hostdata; 1160 if (!rdata || 1161 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1162 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1163 "%d (%d):0721 LUN Reset rport " 1164 "failure: cnt x%x rdata x%p\n", 1165 phba->brd_no, vport->vpi, 1166 loopcnt, rdata); 1167 goto out; 1168 } 1169 pnode = rdata->pnode; 1170 if (!pnode) 1171 goto out; 1172 } 1173 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1174 break; 1175 } 1176 1177 lpfc_cmd = lpfc_get_scsi_buf(phba); 1178 if (lpfc_cmd == NULL) 1179 goto out; 1180 1181 lpfc_cmd->timeout = 60; 1182 lpfc_cmd->rdata = rdata; 1183 1184 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun, 1185 FCP_TARGET_RESET); 1186 if (!ret) 1187 goto out_free_scsi_buf; 1188 1189 iocbq = &lpfc_cmd->cur_iocbq; 1190 1191 /* get a buffer for this IOCB command response */ 1192 iocbqrsp = lpfc_sli_get_iocbq(phba); 1193 if (iocbqrsp == NULL) 1194 goto out_free_scsi_buf; 1195 1196 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1197 "%d (%d):0703 Issue target reset to TGT %d LUN %d " 1198 "rpi x%x nlp_flag x%x\n", 1199 phba->brd_no, vport->vpi, cmnd->device->id, 1200 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1201 1202 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1203 &phba->sli.ring[phba->sli.fcp_ring], 1204 iocbq, iocbqrsp, lpfc_cmd->timeout); 1205 1206 if (iocb_status == IOCB_TIMEDOUT) 1207 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1208 1209 if (iocb_status == IOCB_SUCCESS) 1210 ret = SUCCESS; 1211 else 1212 ret = iocb_status; 1213 1214 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1215 cmd_status = iocbqrsp->iocb.ulpStatus; 1216 1217 lpfc_sli_release_iocbq(phba, iocbqrsp); 1218 1219 /* 1220 * All outstanding txcmplq I/Os should have been aborted by the device. 1221 * Unfortunately, some targets do not abide by this forcing the driver 1222 * to double check. 1223 */ 1224 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1225 cmnd->device->id, cmnd->device->lun, 1226 LPFC_CTX_LUN); 1227 if (cnt) 1228 lpfc_sli_abort_iocb(phba, 1229 &phba->sli.ring[phba->sli.fcp_ring], 1230 cmnd->device->id, cmnd->device->lun, 1231 0, LPFC_CTX_LUN); 1232 loopcnt = 0; 1233 while(cnt) { 1234 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1235 1236 if (++loopcnt 1237 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1238 break; 1239 1240 cnt = lpfc_sli_sum_iocb(phba, 1241 &phba->sli.ring[phba->sli.fcp_ring], 1242 cmnd->device->id, cmnd->device->lun, 1243 LPFC_CTX_LUN); 1244 } 1245 1246 if (cnt) { 1247 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1248 "%d (%d):0719 device reset I/O flush failure: " 1249 "cnt x%x\n", 1250 phba->brd_no, vport->vpi, cnt); 1251 ret = FAILED; 1252 } 1253 1254 out_free_scsi_buf: 1255 if (iocb_status != IOCB_TIMEDOUT) { 1256 lpfc_release_scsi_buf(phba, lpfc_cmd); 1257 } 1258 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1259 "%d (%d):0713 SCSI layer issued device reset (%d, %d) " 1260 "return x%x status x%x result x%x\n", 1261 phba->brd_no, vport->vpi, cmnd->device->id, 1262 cmnd->device->lun, ret, cmd_status, cmd_result); 1263 1264 out: 1265 return ret; 1266 } 1267 1268 static int 1269 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1270 { 1271 struct Scsi_Host *shost = cmnd->device->host; 1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1273 struct lpfc_hba *phba = vport->phba; 1274 struct lpfc_nodelist *ndlp = NULL; 1275 int match; 1276 int ret = FAILED, i, err_count = 0; 1277 int cnt, loopcnt; 1278 struct lpfc_scsi_buf * lpfc_cmd; 1279 1280 lpfc_block_error_handler(cmnd); 1281 1282 lpfc_cmd = lpfc_get_scsi_buf(phba); 1283 if (lpfc_cmd == NULL) 1284 goto out; 1285 1286 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1287 lpfc_cmd->timeout = 60; 1288 1289 /* 1290 * Since the driver manages a single bus device, reset all 1291 * targets known to the driver. Should any target reset 1292 * fail, this routine returns failure to the midlayer. 1293 */ 1294 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1295 /* Search for mapped node by target ID */ 1296 match = 0; 1297 spin_lock_irq(shost->host_lock); 1298 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1299 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1300 i == ndlp->nlp_sid && 1301 ndlp->rport) { 1302 match = 1; 1303 break; 1304 } 1305 } 1306 spin_unlock_irq(shost->host_lock); 1307 if (!match) 1308 continue; 1309 1310 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 1311 cmnd->device->lun, 1312 ndlp->rport->dd_data); 1313 if (ret != SUCCESS) { 1314 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1315 "%d (%d):0700 Bus Reset on target %d " 1316 "failed\n", 1317 phba->brd_no, vport->vpi, i); 1318 err_count++; 1319 break; 1320 } 1321 } 1322 1323 if (ret != IOCB_TIMEDOUT) 1324 lpfc_release_scsi_buf(phba, lpfc_cmd); 1325 1326 if (err_count == 0) 1327 ret = SUCCESS; 1328 else 1329 ret = FAILED; 1330 1331 /* 1332 * All outstanding txcmplq I/Os should have been aborted by 1333 * the targets. Unfortunately, some targets do not abide by 1334 * this forcing the driver to double check. 1335 */ 1336 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1337 0, 0, LPFC_CTX_HOST); 1338 if (cnt) 1339 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1340 0, 0, 0, LPFC_CTX_HOST); 1341 loopcnt = 0; 1342 while(cnt) { 1343 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1344 1345 if (++loopcnt 1346 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1347 break; 1348 1349 cnt = lpfc_sli_sum_iocb(phba, 1350 &phba->sli.ring[phba->sli.fcp_ring], 1351 0, 0, LPFC_CTX_HOST); 1352 } 1353 1354 if (cnt) { 1355 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1356 "%d (%d):0715 Bus Reset I/O flush failure: " 1357 "cnt x%x left x%x\n", 1358 phba->brd_no, vport->vpi, cnt, i); 1359 ret = FAILED; 1360 } 1361 1362 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1363 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n", 1364 phba->brd_no, vport->vpi, ret); 1365 out: 1366 return ret; 1367 } 1368 1369 static int 1370 lpfc_slave_alloc(struct scsi_device *sdev) 1371 { 1372 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1373 struct lpfc_hba *phba = vport->phba; 1374 struct lpfc_scsi_buf *scsi_buf = NULL; 1375 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1376 uint32_t total = 0, i; 1377 uint32_t num_to_alloc = 0; 1378 unsigned long flags; 1379 1380 if (!rport || fc_remote_port_chkready(rport)) 1381 return -ENXIO; 1382 1383 sdev->hostdata = rport->dd_data; 1384 1385 /* 1386 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1387 * available list of scsi buffers. Don't allocate more than the 1388 * HBA limit conveyed to the midlayer via the host structure. The 1389 * formula accounts for the lun_queue_depth + error handlers + 1 1390 * extra. This list of scsi bufs exists for the lifetime of the driver. 1391 */ 1392 total = phba->total_scsi_bufs; 1393 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1394 1395 /* Allow some exchanges to be available always to complete discovery */ 1396 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1397 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1398 "%d (%d):0704 At limitation of %d " 1399 "preallocated command buffers\n", 1400 phba->brd_no, vport->vpi, total); 1401 return 0; 1402 1403 /* Allow some exchanges to be available always to complete discovery */ 1404 } else if (total + num_to_alloc > 1405 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) { 1406 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1407 "%d (%d):0705 Allocation request of %d " 1408 "command buffers will exceed max of %d. " 1409 "Reducing allocation request to %d.\n", 1410 phba->brd_no, vport->vpi, num_to_alloc, 1411 phba->cfg_hba_queue_depth, 1412 (phba->cfg_hba_queue_depth - total)); 1413 num_to_alloc = phba->cfg_hba_queue_depth - total; 1414 } 1415 1416 for (i = 0; i < num_to_alloc; i++) { 1417 scsi_buf = lpfc_new_scsi_buf(vport); 1418 if (!scsi_buf) { 1419 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1420 "%d (%d):0706 Failed to allocate " 1421 "command buffer\n", 1422 phba->brd_no, vport->vpi); 1423 break; 1424 } 1425 1426 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1427 phba->total_scsi_bufs++; 1428 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1429 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1430 } 1431 return 0; 1432 } 1433 1434 static int 1435 lpfc_slave_configure(struct scsi_device *sdev) 1436 { 1437 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 1438 struct lpfc_hba *phba = vport->phba; 1439 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1440 1441 if (sdev->tagged_supported) 1442 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1443 else 1444 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); 1445 1446 /* 1447 * Initialize the fc transport attributes for the target 1448 * containing this scsi device. Also note that the driver's 1449 * target pointer is stored in the starget_data for the 1450 * driver's sysfs entry point functions. 1451 */ 1452 rport->dev_loss_tmo = phba->cfg_devloss_tmo; 1453 1454 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1455 lpfc_sli_poll_fcp_ring(phba); 1456 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1457 lpfc_poll_rearm_timer(phba); 1458 } 1459 1460 return 0; 1461 } 1462 1463 static void 1464 lpfc_slave_destroy(struct scsi_device *sdev) 1465 { 1466 sdev->hostdata = NULL; 1467 return; 1468 } 1469 1470 1471 struct scsi_host_template lpfc_template = { 1472 .module = THIS_MODULE, 1473 .name = LPFC_DRIVER_NAME, 1474 .info = lpfc_info, 1475 .queuecommand = lpfc_queuecommand, 1476 .eh_abort_handler = lpfc_abort_handler, 1477 .eh_device_reset_handler= lpfc_device_reset_handler, 1478 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1479 .slave_alloc = lpfc_slave_alloc, 1480 .slave_configure = lpfc_slave_configure, 1481 .slave_destroy = lpfc_slave_destroy, 1482 .scan_finished = lpfc_scan_finished, 1483 .this_id = -1, 1484 .sg_tablesize = LPFC_SG_SEG_CNT, 1485 .cmd_per_lun = LPFC_CMD_PER_LUN, 1486 .use_clustering = ENABLE_CLUSTERING, 1487 .shost_attrs = lpfc_hba_attrs, 1488 .max_sectors = 0xFFFF, 1489 }; 1490