1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/pci.h> 23 #include <linux/interrupt.h> 24 #include <linux/delay.h> 25 26 #include <scsi/scsi.h> 27 #include <scsi/scsi_device.h> 28 #include <scsi/scsi_host.h> 29 #include <scsi/scsi_tcq.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_version.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_logmsg.h" 39 #include "lpfc_crtn.h" 40 41 #define LPFC_RESET_WAIT 2 42 #define LPFC_ABORT_WAIT 2 43 44 45 /* 46 * This routine allocates a scsi buffer, which contains all the necessary 47 * information needed to initiate a SCSI I/O. The non-DMAable buffer region 48 * contains information to build the IOCB. The DMAable region contains 49 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to 50 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL 51 * and the BPL BDE is setup in the IOCB. 52 */ 53 static struct lpfc_scsi_buf * 54 lpfc_new_scsi_buf(struct lpfc_hba * phba) 55 { 56 struct lpfc_scsi_buf *psb; 57 struct ulp_bde64 *bpl; 58 IOCB_t *iocb; 59 dma_addr_t pdma_phys; 60 uint16_t iotag; 61 62 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); 63 if (!psb) 64 return NULL; 65 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 66 psb->scsi_hba = phba; 67 68 /* 69 * Get memory from the pci pool to map the virt space to pci bus space 70 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, 71 * struct fcp_rsp and the number of bde's necessary to support the 72 * sg_tablesize. 73 */ 74 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, 75 &psb->dma_handle); 76 if (!psb->data) { 77 kfree(psb); 78 return NULL; 79 } 80 81 /* Initialize virtual ptrs to dma_buf region. */ 82 memset(psb->data, 0, phba->cfg_sg_dma_buf_size); 83 84 /* Allocate iotag for psb->cur_iocbq. */ 85 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); 86 if (iotag == 0) { 87 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 88 psb->data, psb->dma_handle); 89 kfree (psb); 90 return NULL; 91 } 92 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; 93 94 psb->fcp_cmnd = psb->data; 95 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); 96 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + 97 sizeof(struct fcp_rsp); 98 99 /* Initialize local short-hand pointers. */ 100 bpl = psb->fcp_bpl; 101 pdma_phys = psb->dma_handle; 102 103 /* 104 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg 105 * list bdes. Initialize the first two and leave the rest for 106 * queuecommand. 107 */ 108 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 109 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 110 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); 111 bpl->tus.f.bdeFlags = BUFF_USE_CMND; 112 bpl->tus.w = le32_to_cpu(bpl->tus.w); 113 bpl++; 114 115 /* Setup the physical region for the FCP RSP */ 116 pdma_phys += sizeof (struct fcp_cmnd); 117 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); 118 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); 119 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); 120 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); 121 bpl->tus.w = le32_to_cpu(bpl->tus.w); 122 123 /* 124 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, 125 * initialize it with all known data now. 126 */ 127 pdma_phys += (sizeof (struct fcp_rsp)); 128 iocb = &psb->cur_iocbq.iocb; 129 iocb->un.fcpi64.bdl.ulpIoTag32 = 0; 130 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); 131 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); 132 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 133 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; 134 iocb->ulpBdeCount = 1; 135 iocb->ulpClass = CLASS3; 136 137 return psb; 138 } 139 140 static struct lpfc_scsi_buf* 141 lpfc_get_scsi_buf(struct lpfc_hba * phba) 142 { 143 struct lpfc_scsi_buf * lpfc_cmd = NULL; 144 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; 145 unsigned long iflag = 0; 146 147 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 148 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); 149 if (lpfc_cmd) { 150 lpfc_cmd->seg_cnt = 0; 151 lpfc_cmd->nonsg_phys = 0; 152 } 153 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 154 return lpfc_cmd; 155 } 156 157 static void 158 lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 159 { 160 unsigned long iflag = 0; 161 162 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 163 psb->pCmd = NULL; 164 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); 165 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 166 } 167 168 static int 169 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 170 { 171 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 172 struct scatterlist *sgel = NULL; 173 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 174 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; 175 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 176 dma_addr_t physaddr; 177 uint32_t i, num_bde = 0; 178 int datadir = scsi_cmnd->sc_data_direction; 179 int dma_error; 180 181 /* 182 * There are three possibilities here - use scatter-gather segment, use 183 * the single mapping, or neither. Start the lpfc command prep by 184 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 185 * data bde entry. 186 */ 187 bpl += 2; 188 if (scsi_cmnd->use_sg) { 189 /* 190 * The driver stores the segment count returned from pci_map_sg 191 * because this a count of dma-mappings used to map the use_sg 192 * pages. They are not guaranteed to be the same for those 193 * architectures that implement an IOMMU. 194 */ 195 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 196 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 197 scsi_cmnd->use_sg, datadir); 198 if (lpfc_cmd->seg_cnt == 0) 199 return 1; 200 201 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 202 printk(KERN_ERR "%s: Too many sg segments from " 203 "dma_map_sg. Config %d, seg_cnt %d", 204 __FUNCTION__, phba->cfg_sg_seg_cnt, 205 lpfc_cmd->seg_cnt); 206 dma_unmap_sg(&phba->pcidev->dev, sgel, 207 lpfc_cmd->seg_cnt, datadir); 208 return 1; 209 } 210 211 /* 212 * The driver established a maximum scatter-gather segment count 213 * during probe that limits the number of sg elements in any 214 * single scsi command. Just run through the seg_cnt and format 215 * the bde's. 216 */ 217 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 218 physaddr = sg_dma_address(sgel); 219 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 220 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 221 bpl->tus.f.bdeSize = sg_dma_len(sgel); 222 if (datadir == DMA_TO_DEVICE) 223 bpl->tus.f.bdeFlags = 0; 224 else 225 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 226 bpl->tus.w = le32_to_cpu(bpl->tus.w); 227 bpl++; 228 sgel++; 229 num_bde++; 230 } 231 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 232 physaddr = dma_map_single(&phba->pcidev->dev, 233 scsi_cmnd->request_buffer, 234 scsi_cmnd->request_bufflen, 235 datadir); 236 dma_error = dma_mapping_error(physaddr); 237 if (dma_error) { 238 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 239 "%d:0718 Unable to dma_map_single " 240 "request_buffer: x%x\n", 241 phba->brd_no, dma_error); 242 return 1; 243 } 244 245 lpfc_cmd->nonsg_phys = physaddr; 246 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 247 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 248 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; 249 if (datadir == DMA_TO_DEVICE) 250 bpl->tus.f.bdeFlags = 0; 251 else 252 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 253 bpl->tus.w = le32_to_cpu(bpl->tus.w); 254 num_bde = 1; 255 bpl++; 256 } 257 258 /* 259 * Finish initializing those IOCB fields that are dependent on the 260 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly 261 * reinitialized since all iocb memory resources are used many times 262 * for transmit, receive, and continuation bpl's. 263 */ 264 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 265 iocb_cmd->un.fcpi64.bdl.bdeSize += 266 (num_bde * sizeof (struct ulp_bde64)); 267 iocb_cmd->ulpBdeCount = 1; 268 iocb_cmd->ulpLe = 1; 269 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 270 return 0; 271 } 272 273 static void 274 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 275 { 276 /* 277 * There are only two special cases to consider. (1) the scsi command 278 * requested scatter-gather usage or (2) the scsi command allocated 279 * a request buffer, but did not request use_sg. There is a third 280 * case, but it does not require resource deallocation. 281 */ 282 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 283 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 284 psb->seg_cnt, psb->pCmd->sc_data_direction); 285 } else { 286 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { 287 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, 288 psb->pCmd->request_bufflen, 289 psb->pCmd->sc_data_direction); 290 } 291 } 292 } 293 294 static void 295 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) 296 { 297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 298 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 300 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 302 uint32_t resp_info = fcprsp->rspStatus2; 303 uint32_t scsi_status = fcprsp->rspStatus3; 304 uint32_t *lp; 305 uint32_t host_status = DID_OK; 306 uint32_t rsplen = 0; 307 uint32_t logit = LOG_FCP | LOG_FCP_ERROR; 308 309 /* 310 * If this is a task management command, there is no 311 * scsi packet associated with this lpfc_cmd. The driver 312 * consumes it. 313 */ 314 if (fcpcmd->fcpCntl2) { 315 scsi_status = 0; 316 goto out; 317 } 318 319 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { 320 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); 321 if (snslen > SCSI_SENSE_BUFFERSIZE) 322 snslen = SCSI_SENSE_BUFFERSIZE; 323 324 if (resp_info & RSP_LEN_VALID) 325 rsplen = be32_to_cpu(fcprsp->rspRspLen); 326 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); 327 } 328 lp = (uint32_t *)cmnd->sense_buffer; 329 330 if (!scsi_status && (resp_info & RESID_UNDER)) 331 logit = LOG_FCP; 332 333 lpfc_printf_log(phba, KERN_WARNING, logit, 334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 335 "Data: x%x x%x x%x x%x x%x\n", 336 phba->brd_no, cmnd->cmnd[0], scsi_status, 337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 338 be32_to_cpu(fcprsp->rspResId), 339 be32_to_cpu(fcprsp->rspSnsLen), 340 be32_to_cpu(fcprsp->rspRspLen), 341 fcprsp->rspInfo3); 342 343 if (resp_info & RSP_LEN_VALID) { 344 rsplen = be32_to_cpu(fcprsp->rspRspLen); 345 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || 346 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { 347 host_status = DID_ERROR; 348 goto out; 349 } 350 } 351 352 cmnd->resid = 0; 353 if (resp_info & RESID_UNDER) { 354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 355 356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 357 "%d:0716 FCP Read Underrun, expected %d, " 358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 361 362 /* 363 * If there is an under run check if under run reported by 364 * storage array is same as the under run reported by HBA. 365 * If this is not same, there is a dropped frame. 366 */ 367 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 368 fcpi_parm && 369 (cmnd->resid != fcpi_parm)) { 370 lpfc_printf_log(phba, KERN_WARNING, 371 LOG_FCP | LOG_FCP_ERROR, 372 "%d:0735 FCP Read Check Error and Underrun " 373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 374 be32_to_cpu(fcpcmd->fcpDl), 375 cmnd->resid, 376 fcpi_parm, cmnd->cmnd[0]); 377 cmnd->resid = cmnd->request_bufflen; 378 host_status = DID_ERROR; 379 } 380 /* 381 * The cmnd->underflow is the minimum number of bytes that must 382 * be transfered for this command. Provided a sense condition 383 * is not present, make sure the actual amount transferred is at 384 * least the underflow value or fail. 385 */ 386 if (!(resp_info & SNS_LEN_VALID) && 387 (scsi_status == SAM_STAT_GOOD) && 388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 390 "%d:0717 FCP command x%x residual " 391 "underrun converted to error " 392 "Data: x%x x%x x%x\n", phba->brd_no, 393 cmnd->cmnd[0], cmnd->request_bufflen, 394 cmnd->resid, cmnd->underflow); 395 396 host_status = DID_ERROR; 397 } 398 } else if (resp_info & RESID_OVER) { 399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 400 "%d:0720 FCP command x%x residual " 401 "overrun error. Data: x%x x%x \n", 402 phba->brd_no, cmnd->cmnd[0], 403 cmnd->request_bufflen, cmnd->resid); 404 host_status = DID_ERROR; 405 406 /* 407 * Check SLI validation that all the transfer was actually done 408 * (fcpi_parm should be zero). Apply check only to reads. 409 */ 410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 413 "%d:0734 FCP Read Check Error Data: " 414 "x%x x%x x%x x%x\n", phba->brd_no, 415 be32_to_cpu(fcpcmd->fcpDl), 416 be32_to_cpu(fcprsp->rspResId), 417 fcpi_parm, cmnd->cmnd[0]); 418 host_status = DID_ERROR; 419 cmnd->resid = cmnd->request_bufflen; 420 } 421 422 out: 423 cmnd->result = ScsiResult(host_status, scsi_status); 424 } 425 426 static void 427 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, 428 struct lpfc_iocbq *pIocbOut) 429 { 430 struct lpfc_scsi_buf *lpfc_cmd = 431 (struct lpfc_scsi_buf *) pIocbIn->context1; 432 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 433 struct lpfc_nodelist *pnode = rdata->pnode; 434 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 435 int result; 436 struct scsi_device *sdev, *tmp_sdev; 437 int depth = 0; 438 439 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 440 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 441 442 if (lpfc_cmd->status) { 443 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 444 (lpfc_cmd->result & IOERR_DRVR_MASK)) 445 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 446 else if (lpfc_cmd->status >= IOSTAT_CNT) 447 lpfc_cmd->status = IOSTAT_DEFAULT; 448 449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 450 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 451 "x%x result: x%x Data: x%x x%x\n", 452 phba->brd_no, cmd->cmnd[0], cmd->device->id, 453 cmd->device->lun, lpfc_cmd->status, 454 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 455 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 456 457 switch (lpfc_cmd->status) { 458 case IOSTAT_FCP_RSP_ERROR: 459 /* Call FCP RSP handler to determine result */ 460 lpfc_handle_fcp_err(lpfc_cmd,pIocbOut); 461 break; 462 case IOSTAT_NPORT_BSY: 463 case IOSTAT_FABRIC_BSY: 464 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 465 break; 466 default: 467 cmd->result = ScsiResult(DID_ERROR, 0); 468 break; 469 } 470 471 if ((pnode == NULL ) 472 || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) 473 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); 474 } else { 475 cmd->result = ScsiResult(DID_OK, 0); 476 } 477 478 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { 479 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 480 481 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 482 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 483 "SNS x%x x%x Data: x%x x%x\n", 484 phba->brd_no, cmd->device->id, 485 cmd->device->lun, cmd, cmd->result, 486 *lp, *(lp + 3), cmd->retries, cmd->resid); 487 } 488 489 result = cmd->result; 490 sdev = cmd->device; 491 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 492 cmd->scsi_done(cmd); 493 494 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 495 lpfc_release_scsi_buf(phba, lpfc_cmd); 496 return; 497 } 498 499 if (!result && pnode != NULL && 500 ((jiffies - pnode->last_ramp_up_time) > 501 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 502 ((jiffies - pnode->last_q_full_time) > 503 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 504 (phba->cfg_lun_queue_depth > sdev->queue_depth)) { 505 shost_for_each_device(tmp_sdev, sdev->host) { 506 if (phba->cfg_lun_queue_depth > tmp_sdev->queue_depth) { 507 if (tmp_sdev->id != sdev->id) 508 continue; 509 if (tmp_sdev->ordered_tags) 510 scsi_adjust_queue_depth(tmp_sdev, 511 MSG_ORDERED_TAG, 512 tmp_sdev->queue_depth+1); 513 else 514 scsi_adjust_queue_depth(tmp_sdev, 515 MSG_SIMPLE_TAG, 516 tmp_sdev->queue_depth+1); 517 518 pnode->last_ramp_up_time = jiffies; 519 } 520 } 521 } 522 523 /* 524 * Check for queue full. If the lun is reporting queue full, then 525 * back off the lun queue depth to prevent target overloads. 526 */ 527 if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { 528 pnode->last_q_full_time = jiffies; 529 530 shost_for_each_device(tmp_sdev, sdev->host) { 531 if (tmp_sdev->id != sdev->id) 532 continue; 533 depth = scsi_track_queue_full(tmp_sdev, 534 tmp_sdev->queue_depth - 1); 535 } 536 /* 537 * The queue depth cannot be lowered any more. 538 * Modify the returned error code to store 539 * the final depth value set by 540 * scsi_track_queue_full. 541 */ 542 if (depth == -1) 543 depth = sdev->host->cmd_per_lun; 544 545 if (depth) { 546 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 547 "%d:0711 detected queue full - lun queue depth " 548 " adjusted to %d.\n", phba->brd_no, depth); 549 } 550 } 551 552 lpfc_release_scsi_buf(phba, lpfc_cmd); 553 } 554 555 static void 556 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 557 struct lpfc_nodelist *pnode) 558 { 559 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 560 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 561 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 562 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 563 int datadir = scsi_cmnd->sc_data_direction; 564 565 lpfc_cmd->fcp_rsp->rspSnsLen = 0; 566 /* clear task management bits */ 567 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; 568 569 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 570 &lpfc_cmd->fcp_cmnd->fcp_lun); 571 572 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); 573 574 if (scsi_cmnd->device->tagged_supported) { 575 switch (scsi_cmnd->tag) { 576 case HEAD_OF_QUEUE_TAG: 577 fcp_cmnd->fcpCntl1 = HEAD_OF_Q; 578 break; 579 case ORDERED_QUEUE_TAG: 580 fcp_cmnd->fcpCntl1 = ORDERED_Q; 581 break; 582 default: 583 fcp_cmnd->fcpCntl1 = SIMPLE_Q; 584 break; 585 } 586 } else 587 fcp_cmnd->fcpCntl1 = 0; 588 589 /* 590 * There are three possibilities here - use scatter-gather segment, use 591 * the single mapping, or neither. Start the lpfc command prep by 592 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 593 * data bde entry. 594 */ 595 if (scsi_cmnd->use_sg) { 596 if (datadir == DMA_TO_DEVICE) { 597 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 598 iocb_cmd->un.fcpi.fcpi_parm = 0; 599 iocb_cmd->ulpPU = 0; 600 fcp_cmnd->fcpCntl3 = WRITE_DATA; 601 phba->fc4OutputRequests++; 602 } else { 603 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 604 iocb_cmd->ulpPU = PARM_READ_CHECK; 605 iocb_cmd->un.fcpi.fcpi_parm = 606 scsi_cmnd->request_bufflen; 607 fcp_cmnd->fcpCntl3 = READ_DATA; 608 phba->fc4InputRequests++; 609 } 610 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { 611 if (datadir == DMA_TO_DEVICE) { 612 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 613 iocb_cmd->un.fcpi.fcpi_parm = 0; 614 iocb_cmd->ulpPU = 0; 615 fcp_cmnd->fcpCntl3 = WRITE_DATA; 616 phba->fc4OutputRequests++; 617 } else { 618 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 619 iocb_cmd->ulpPU = PARM_READ_CHECK; 620 iocb_cmd->un.fcpi.fcpi_parm = 621 scsi_cmnd->request_bufflen; 622 fcp_cmnd->fcpCntl3 = READ_DATA; 623 phba->fc4InputRequests++; 624 } 625 } else { 626 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; 627 iocb_cmd->un.fcpi.fcpi_parm = 0; 628 iocb_cmd->ulpPU = 0; 629 fcp_cmnd->fcpCntl3 = 0; 630 phba->fc4ControlRequests++; 631 } 632 633 /* 634 * Finish initializing those IOCB fields that are independent 635 * of the scsi_cmnd request_buffer 636 */ 637 piocbq->iocb.ulpContext = pnode->nlp_rpi; 638 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 639 piocbq->iocb.ulpFCP2Rcvy = 1; 640 641 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f); 642 piocbq->context1 = lpfc_cmd; 643 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 644 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 645 } 646 647 static int 648 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 649 struct lpfc_scsi_buf *lpfc_cmd, 650 unsigned int lun, 651 uint8_t task_mgmt_cmd) 652 { 653 struct lpfc_sli *psli; 654 struct lpfc_iocbq *piocbq; 655 IOCB_t *piocb; 656 struct fcp_cmnd *fcp_cmnd; 657 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 658 struct lpfc_nodelist *ndlp = rdata->pnode; 659 660 if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 661 return 0; 662 } 663 664 psli = &phba->sli; 665 piocbq = &(lpfc_cmd->cur_iocbq); 666 piocb = &piocbq->iocb; 667 668 fcp_cmnd = lpfc_cmd->fcp_cmnd; 669 int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun); 670 fcp_cmnd->fcpCntl2 = task_mgmt_cmd; 671 672 piocb->ulpCommand = CMD_FCP_ICMND64_CR; 673 674 piocb->ulpContext = ndlp->nlp_rpi; 675 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 676 piocb->ulpFCP2Rcvy = 1; 677 } 678 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); 679 680 /* ulpTimeout is only one byte */ 681 if (lpfc_cmd->timeout > 0xff) { 682 /* 683 * Do not timeout the command at the firmware level. 684 * The driver will provide the timeout mechanism. 685 */ 686 piocb->ulpTimeout = 0; 687 } else { 688 piocb->ulpTimeout = lpfc_cmd->timeout; 689 } 690 691 return (1); 692 } 693 694 static void 695 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, 696 struct lpfc_iocbq *cmdiocbq, 697 struct lpfc_iocbq *rspiocbq) 698 { 699 struct lpfc_scsi_buf *lpfc_cmd = 700 (struct lpfc_scsi_buf *) cmdiocbq->context1; 701 if (lpfc_cmd) 702 lpfc_release_scsi_buf(phba, lpfc_cmd); 703 return; 704 } 705 706 static int 707 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 708 unsigned tgt_id, unsigned int lun, 709 struct lpfc_rport_data *rdata) 710 { 711 struct lpfc_iocbq *iocbq; 712 struct lpfc_iocbq *iocbqrsp; 713 int ret; 714 715 if (!rdata->pnode) 716 return FAILED; 717 718 lpfc_cmd->rdata = rdata; 719 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 720 FCP_TARGET_RESET); 721 if (!ret) 722 return FAILED; 723 724 lpfc_cmd->scsi_hba = phba; 725 iocbq = &lpfc_cmd->cur_iocbq; 726 iocbqrsp = lpfc_sli_get_iocbq(phba); 727 728 if (!iocbqrsp) 729 return FAILED; 730 731 /* Issue Target Reset to TGT <num> */ 732 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 733 "%d:0702 Issue Target Reset to TGT %d " 734 "Data: x%x x%x\n", 735 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 736 rdata->pnode->nlp_flag); 737 738 ret = lpfc_sli_issue_iocb_wait(phba, 739 &phba->sli.ring[phba->sli.fcp_ring], 740 iocbq, iocbqrsp, lpfc_cmd->timeout); 741 if (ret != IOCB_SUCCESS) { 742 if (ret == IOCB_TIMEDOUT) 743 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 744 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 745 } else { 746 ret = SUCCESS; 747 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; 748 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; 749 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && 750 (lpfc_cmd->result & IOERR_DRVR_MASK)) 751 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 752 } 753 754 lpfc_sli_release_iocbq(phba, iocbqrsp); 755 return ret; 756 } 757 758 const char * 759 lpfc_info(struct Scsi_Host *host) 760 { 761 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 762 int len; 763 static char lpfcinfobuf[384]; 764 765 memset(lpfcinfobuf,0,384); 766 if (phba && phba->pcidev){ 767 strncpy(lpfcinfobuf, phba->ModelDesc, 256); 768 len = strlen(lpfcinfobuf); 769 snprintf(lpfcinfobuf + len, 770 384-len, 771 " on PCI bus %02x device %02x irq %d", 772 phba->pcidev->bus->number, 773 phba->pcidev->devfn, 774 phba->pcidev->irq); 775 len = strlen(lpfcinfobuf); 776 if (phba->Port[0]) { 777 snprintf(lpfcinfobuf + len, 778 384-len, 779 " port %s", 780 phba->Port); 781 } 782 } 783 return lpfcinfobuf; 784 } 785 786 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) 787 { 788 unsigned long poll_tmo_expires = 789 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); 790 791 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) 792 mod_timer(&phba->fcp_poll_timer, 793 poll_tmo_expires); 794 } 795 796 void lpfc_poll_start_timer(struct lpfc_hba * phba) 797 { 798 lpfc_poll_rearm_timer(phba); 799 } 800 801 void lpfc_poll_timeout(unsigned long ptr) 802 { 803 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 804 unsigned long iflag; 805 806 spin_lock_irqsave(phba->host->host_lock, iflag); 807 808 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 809 lpfc_sli_poll_fcp_ring (phba); 810 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 811 lpfc_poll_rearm_timer(phba); 812 } 813 814 spin_unlock_irqrestore(phba->host->host_lock, iflag); 815 } 816 817 static int 818 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 819 { 820 struct lpfc_hba *phba = 821 (struct lpfc_hba *) cmnd->device->host->hostdata; 822 struct lpfc_sli *psli = &phba->sli; 823 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 824 struct lpfc_nodelist *ndlp = rdata->pnode; 825 struct lpfc_scsi_buf *lpfc_cmd; 826 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 827 int err; 828 829 err = fc_remote_port_chkready(rport); 830 if (err) { 831 cmnd->result = err; 832 goto out_fail_command; 833 } 834 835 /* 836 * Catch race where our node has transitioned, but the 837 * transport is still transitioning. 838 */ 839 if (!ndlp) { 840 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 841 goto out_fail_command; 842 } 843 lpfc_cmd = lpfc_get_scsi_buf (phba); 844 if (lpfc_cmd == NULL) { 845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 846 "%d:0707 driver's buffer pool is empty, " 847 "IO busied\n", phba->brd_no); 848 goto out_host_busy; 849 } 850 851 /* 852 * Store the midlayer's command structure for the completion phase 853 * and complete the command initialization. 854 */ 855 lpfc_cmd->pCmd = cmnd; 856 lpfc_cmd->rdata = rdata; 857 lpfc_cmd->timeout = 0; 858 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 859 cmnd->scsi_done = done; 860 861 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 862 if (err) 863 goto out_host_busy_free_buf; 864 865 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 866 867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 869 if (err) 870 goto out_host_busy_free_buf; 871 872 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 873 lpfc_sli_poll_fcp_ring(phba); 874 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 875 lpfc_poll_rearm_timer(phba); 876 } 877 878 return 0; 879 880 out_host_busy_free_buf: 881 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); 882 lpfc_release_scsi_buf(phba, lpfc_cmd); 883 out_host_busy: 884 return SCSI_MLQUEUE_HOST_BUSY; 885 886 out_fail_command: 887 done(cmnd); 888 return 0; 889 } 890 891 static void 892 lpfc_block_error_handler(struct scsi_cmnd *cmnd) 893 { 894 struct Scsi_Host *shost = cmnd->device->host; 895 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 896 897 spin_lock_irq(shost->host_lock); 898 while (rport->port_state == FC_PORTSTATE_BLOCKED) { 899 spin_unlock_irq(shost->host_lock); 900 msleep(1000); 901 spin_lock_irq(shost->host_lock); 902 } 903 spin_unlock_irq(shost->host_lock); 904 return; 905 } 906 907 static int 908 lpfc_abort_handler(struct scsi_cmnd *cmnd) 909 { 910 struct Scsi_Host *shost = cmnd->device->host; 911 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 912 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 913 struct lpfc_iocbq *iocb; 914 struct lpfc_iocbq *abtsiocb; 915 struct lpfc_scsi_buf *lpfc_cmd; 916 IOCB_t *cmd, *icmd; 917 unsigned int loop_count = 0; 918 int ret = SUCCESS; 919 920 lpfc_block_error_handler(cmnd); 921 spin_lock_irq(shost->host_lock); 922 923 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 924 BUG_ON(!lpfc_cmd); 925 926 /* 927 * If pCmd field of the corresponding lpfc_scsi_buf structure 928 * points to a different SCSI command, then the driver has 929 * already completed this command, but the midlayer did not 930 * see the completion before the eh fired. Just return 931 * SUCCESS. 932 */ 933 iocb = &lpfc_cmd->cur_iocbq; 934 if (lpfc_cmd->pCmd != cmnd) 935 goto out; 936 937 BUG_ON(iocb->context1 != lpfc_cmd); 938 939 abtsiocb = lpfc_sli_get_iocbq(phba); 940 if (abtsiocb == NULL) { 941 ret = FAILED; 942 goto out; 943 } 944 945 /* 946 * The scsi command can not be in txq and it is in flight because the 947 * pCmd is still pointig at the SCSI command we have to abort. There 948 * is no need to search the txcmplq. Just send an abort to the FW. 949 */ 950 951 cmd = &iocb->iocb; 952 icmd = &abtsiocb->iocb; 953 icmd->un.acxri.abortType = ABORT_TYPE_ABTS; 954 icmd->un.acxri.abortContextTag = cmd->ulpContext; 955 icmd->un.acxri.abortIoTag = cmd->ulpIoTag; 956 957 icmd->ulpLe = 1; 958 icmd->ulpClass = cmd->ulpClass; 959 if (phba->hba_state >= LPFC_LINK_UP) 960 icmd->ulpCommand = CMD_ABORT_XRI_CN; 961 else 962 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 963 964 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 965 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 966 lpfc_sli_release_iocbq(phba, abtsiocb); 967 ret = FAILED; 968 goto out; 969 } 970 971 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 972 lpfc_sli_poll_fcp_ring (phba); 973 974 /* Wait for abort to complete */ 975 while (lpfc_cmd->pCmd == cmnd) 976 { 977 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 978 lpfc_sli_poll_fcp_ring (phba); 979 980 spin_unlock_irq(phba->host->host_lock); 981 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 982 spin_lock_irq(phba->host->host_lock); 983 if (++loop_count 984 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 985 break; 986 } 987 988 if (lpfc_cmd->pCmd == cmnd) { 989 ret = FAILED; 990 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 991 "%d:0748 abort handler timed out waiting for " 992 "abort to complete: ret %#x, ID %d, LUN %d, " 993 "snum %#lx\n", 994 phba->brd_no, ret, cmnd->device->id, 995 cmnd->device->lun, cmnd->serial_number); 996 } 997 998 out: 999 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1000 "%d:0749 SCSI Layer I/O Abort Request " 1001 "Status x%x ID %d LUN %d snum %#lx\n", 1002 phba->brd_no, ret, cmnd->device->id, 1003 cmnd->device->lun, cmnd->serial_number); 1004 1005 spin_unlock_irq(shost->host_lock); 1006 1007 return ret; 1008 } 1009 1010 static int 1011 lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1012 { 1013 struct Scsi_Host *shost = cmnd->device->host; 1014 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1015 struct lpfc_scsi_buf *lpfc_cmd; 1016 struct lpfc_iocbq *iocbq, *iocbqrsp; 1017 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1018 struct lpfc_nodelist *pnode = rdata->pnode; 1019 uint32_t cmd_result = 0, cmd_status = 0; 1020 int ret = FAILED; 1021 int iocb_status = IOCB_SUCCESS; 1022 int cnt, loopcnt; 1023 1024 lpfc_block_error_handler(cmnd); 1025 spin_lock_irq(shost->host_lock); 1026 loopcnt = 0; 1027 /* 1028 * If target is not in a MAPPED state, delay the reset until 1029 * target is rediscovered or devloss timeout expires. 1030 */ 1031 while ( 1 ) { 1032 if (!pnode) 1033 goto out; 1034 1035 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1036 spin_unlock_irq(phba->host->host_lock); 1037 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1038 spin_lock_irq(phba->host->host_lock); 1039 loopcnt++; 1040 rdata = cmnd->device->hostdata; 1041 if (!rdata || 1042 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1044 "%d:0721 LUN Reset rport failure:" 1045 " cnt x%x rdata x%p\n", 1046 phba->brd_no, loopcnt, rdata); 1047 goto out; 1048 } 1049 pnode = rdata->pnode; 1050 if (!pnode) 1051 goto out; 1052 } 1053 if (pnode->nlp_state == NLP_STE_MAPPED_NODE) 1054 break; 1055 } 1056 1057 lpfc_cmd = lpfc_get_scsi_buf (phba); 1058 if (lpfc_cmd == NULL) 1059 goto out; 1060 1061 lpfc_cmd->timeout = 60; 1062 lpfc_cmd->scsi_hba = phba; 1063 lpfc_cmd->rdata = rdata; 1064 1065 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 1066 FCP_TARGET_RESET); 1067 if (!ret) 1068 goto out_free_scsi_buf; 1069 1070 iocbq = &lpfc_cmd->cur_iocbq; 1071 1072 /* get a buffer for this IOCB command response */ 1073 iocbqrsp = lpfc_sli_get_iocbq(phba); 1074 if (iocbqrsp == NULL) 1075 goto out_free_scsi_buf; 1076 1077 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1079 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1080 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1081 1082 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1083 &phba->sli.ring[phba->sli.fcp_ring], 1084 iocbq, iocbqrsp, lpfc_cmd->timeout); 1085 1086 if (iocb_status == IOCB_TIMEDOUT) 1087 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 1088 1089 if (iocb_status == IOCB_SUCCESS) 1090 ret = SUCCESS; 1091 else 1092 ret = iocb_status; 1093 1094 cmd_result = iocbqrsp->iocb.un.ulpWord[4]; 1095 cmd_status = iocbqrsp->iocb.ulpStatus; 1096 1097 lpfc_sli_release_iocbq(phba, iocbqrsp); 1098 1099 /* 1100 * All outstanding txcmplq I/Os should have been aborted by the device. 1101 * Unfortunately, some targets do not abide by this forcing the driver 1102 * to double check. 1103 */ 1104 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1105 cmnd->device->id, cmnd->device->lun, 1106 LPFC_CTX_LUN); 1107 if (cnt) 1108 lpfc_sli_abort_iocb(phba, 1109 &phba->sli.ring[phba->sli.fcp_ring], 1110 cmnd->device->id, cmnd->device->lun, 1111 0, LPFC_CTX_LUN); 1112 loopcnt = 0; 1113 while(cnt) { 1114 spin_unlock_irq(phba->host->host_lock); 1115 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1116 spin_lock_irq(phba->host->host_lock); 1117 1118 if (++loopcnt 1119 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1120 break; 1121 1122 cnt = lpfc_sli_sum_iocb(phba, 1123 &phba->sli.ring[phba->sli.fcp_ring], 1124 cmnd->device->id, cmnd->device->lun, 1125 LPFC_CTX_LUN); 1126 } 1127 1128 if (cnt) { 1129 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1130 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1131 phba->brd_no, cnt); 1132 ret = FAILED; 1133 } 1134 1135 out_free_scsi_buf: 1136 if (iocb_status != IOCB_TIMEDOUT) { 1137 lpfc_release_scsi_buf(phba, lpfc_cmd); 1138 } 1139 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1140 "%d:0713 SCSI layer issued device reset (%d, %d) " 1141 "return x%x status x%x result x%x\n", 1142 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1143 ret, cmd_status, cmd_result); 1144 1145 out: 1146 spin_unlock_irq(shost->host_lock); 1147 return ret; 1148 } 1149 1150 static int 1151 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1152 { 1153 struct Scsi_Host *shost = cmnd->device->host; 1154 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1155 struct lpfc_nodelist *ndlp = NULL; 1156 int match; 1157 int ret = FAILED, i, err_count = 0; 1158 int cnt, loopcnt; 1159 struct lpfc_scsi_buf * lpfc_cmd; 1160 1161 lpfc_block_error_handler(cmnd); 1162 spin_lock_irq(shost->host_lock); 1163 1164 lpfc_cmd = lpfc_get_scsi_buf(phba); 1165 if (lpfc_cmd == NULL) 1166 goto out; 1167 1168 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1169 lpfc_cmd->timeout = 60; 1170 lpfc_cmd->scsi_hba = phba; 1171 1172 /* 1173 * Since the driver manages a single bus device, reset all 1174 * targets known to the driver. Should any target reset 1175 * fail, this routine returns failure to the midlayer. 1176 */ 1177 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1178 /* Search for mapped node by target ID */ 1179 match = 0; 1180 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 1181 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1182 i == ndlp->nlp_sid && 1183 ndlp->rport) { 1184 match = 1; 1185 break; 1186 } 1187 } 1188 if (!match) 1189 continue; 1190 1191 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, 1192 ndlp->rport->dd_data); 1193 if (ret != SUCCESS) { 1194 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1195 "%d:0700 Bus Reset on target %d failed\n", 1196 phba->brd_no, i); 1197 err_count++; 1198 break; 1199 } 1200 } 1201 1202 if (ret != IOCB_TIMEDOUT) 1203 lpfc_release_scsi_buf(phba, lpfc_cmd); 1204 1205 if (err_count == 0) 1206 ret = SUCCESS; 1207 else 1208 ret = FAILED; 1209 1210 /* 1211 * All outstanding txcmplq I/Os should have been aborted by 1212 * the targets. Unfortunately, some targets do not abide by 1213 * this forcing the driver to double check. 1214 */ 1215 cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1216 0, 0, LPFC_CTX_HOST); 1217 if (cnt) 1218 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1219 0, 0, 0, LPFC_CTX_HOST); 1220 loopcnt = 0; 1221 while(cnt) { 1222 spin_unlock_irq(phba->host->host_lock); 1223 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1224 spin_lock_irq(phba->host->host_lock); 1225 1226 if (++loopcnt 1227 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1228 break; 1229 1230 cnt = lpfc_sli_sum_iocb(phba, 1231 &phba->sli.ring[phba->sli.fcp_ring], 1232 0, 0, LPFC_CTX_HOST); 1233 } 1234 1235 if (cnt) { 1236 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1237 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1238 phba->brd_no, cnt, i); 1239 ret = FAILED; 1240 } 1241 1242 lpfc_printf_log(phba, 1243 KERN_ERR, 1244 LOG_FCP, 1245 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", 1246 phba->brd_no, ret); 1247 out: 1248 spin_unlock_irq(shost->host_lock); 1249 return ret; 1250 } 1251 1252 static int 1253 lpfc_slave_alloc(struct scsi_device *sdev) 1254 { 1255 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; 1256 struct lpfc_scsi_buf *scsi_buf = NULL; 1257 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1258 uint32_t total = 0, i; 1259 uint32_t num_to_alloc = 0; 1260 unsigned long flags; 1261 1262 if (!rport || fc_remote_port_chkready(rport)) 1263 return -ENXIO; 1264 1265 sdev->hostdata = rport->dd_data; 1266 1267 /* 1268 * Populate the cmds_per_lun count scsi_bufs into this host's globally 1269 * available list of scsi buffers. Don't allocate more than the 1270 * HBA limit conveyed to the midlayer via the host structure. The 1271 * formula accounts for the lun_queue_depth + error handlers + 1 1272 * extra. This list of scsi bufs exists for the lifetime of the driver. 1273 */ 1274 total = phba->total_scsi_bufs; 1275 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1276 if (total >= phba->cfg_hba_queue_depth) { 1277 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1278 "%d:0704 At limitation of %d preallocated " 1279 "command buffers\n", phba->brd_no, total); 1280 return 0; 1281 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1282 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1283 "%d:0705 Allocation request of %d command " 1284 "buffers will exceed max of %d. Reducing " 1285 "allocation request to %d.\n", phba->brd_no, 1286 num_to_alloc, phba->cfg_hba_queue_depth, 1287 (phba->cfg_hba_queue_depth - total)); 1288 num_to_alloc = phba->cfg_hba_queue_depth - total; 1289 } 1290 1291 for (i = 0; i < num_to_alloc; i++) { 1292 scsi_buf = lpfc_new_scsi_buf(phba); 1293 if (!scsi_buf) { 1294 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1295 "%d:0706 Failed to allocate command " 1296 "buffer\n", phba->brd_no); 1297 break; 1298 } 1299 1300 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); 1301 phba->total_scsi_bufs++; 1302 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); 1303 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); 1304 } 1305 return 0; 1306 } 1307 1308 static int 1309 lpfc_slave_configure(struct scsi_device *sdev) 1310 { 1311 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; 1312 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1313 1314 if (sdev->tagged_supported) 1315 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1316 else 1317 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); 1318 1319 /* 1320 * Initialize the fc transport attributes for the target 1321 * containing this scsi device. Also note that the driver's 1322 * target pointer is stored in the starget_data for the 1323 * driver's sysfs entry point functions. 1324 */ 1325 rport->dev_loss_tmo = phba->cfg_devloss_tmo; 1326 1327 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1328 lpfc_sli_poll_fcp_ring(phba); 1329 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1330 lpfc_poll_rearm_timer(phba); 1331 } 1332 1333 return 0; 1334 } 1335 1336 static void 1337 lpfc_slave_destroy(struct scsi_device *sdev) 1338 { 1339 sdev->hostdata = NULL; 1340 return; 1341 } 1342 1343 struct scsi_host_template lpfc_template = { 1344 .module = THIS_MODULE, 1345 .name = LPFC_DRIVER_NAME, 1346 .info = lpfc_info, 1347 .queuecommand = lpfc_queuecommand, 1348 .eh_abort_handler = lpfc_abort_handler, 1349 .eh_device_reset_handler= lpfc_device_reset_handler, 1350 .eh_bus_reset_handler = lpfc_bus_reset_handler, 1351 .slave_alloc = lpfc_slave_alloc, 1352 .slave_configure = lpfc_slave_configure, 1353 .slave_destroy = lpfc_slave_destroy, 1354 .scan_finished = lpfc_scan_finished, 1355 .scan_start = lpfc_scan_start, 1356 .this_id = -1, 1357 .sg_tablesize = LPFC_SG_SEG_CNT, 1358 .cmd_per_lun = LPFC_CMD_PER_LUN, 1359 .use_clustering = ENABLE_CLUSTERING, 1360 .shost_attrs = lpfc_host_attrs, 1361 .max_sectors = 0xFFFF, 1362 }; 1363