1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_version.h" 46 47 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49 static int lpfc_post_rcv_buf(struct lpfc_hba *); 50 51 static struct scsi_transport_template *lpfc_transport_template = NULL; 52 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 53 static DEFINE_IDR(lpfc_hba_index); 54 55 /************************************************************************/ 56 /* */ 57 /* lpfc_config_port_prep */ 58 /* This routine will do LPFC initialization prior to the */ 59 /* CONFIG_PORT mailbox command. This will be initialized */ 60 /* as a SLI layer callback routine. */ 61 /* This routine returns 0 on success or -ERESTART if it wants */ 62 /* the SLI layer to reset the HBA and try again. Any */ 63 /* other return value indicates an error. */ 64 /* */ 65 /************************************************************************/ 66 int 67 lpfc_config_port_prep(struct lpfc_hba *phba) 68 { 69 lpfc_vpd_t *vp = &phba->vpd; 70 int i = 0, rc; 71 LPFC_MBOXQ_t *pmb; 72 MAILBOX_t *mb; 73 char *lpfc_vpd_data = NULL; 74 uint16_t offset = 0; 75 static char licensed[56] = 76 "key unlock for use with gnu public licensed code only\0"; 77 static int init_key = 1; 78 79 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 80 if (!pmb) { 81 phba->link_state = LPFC_HBA_ERROR; 82 return -ENOMEM; 83 } 84 85 mb = &pmb->mb; 86 phba->link_state = LPFC_INIT_MBX_CMDS; 87 88 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 89 if (init_key) { 90 uint32_t *ptext = (uint32_t *) licensed; 91 92 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 93 *ptext = cpu_to_be32(*ptext); 94 init_key = 0; 95 } 96 97 lpfc_read_nv(phba, pmb); 98 memset((char*)mb->un.varRDnvp.rsvd3, 0, 99 sizeof (mb->un.varRDnvp.rsvd3)); 100 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 101 sizeof (licensed)); 102 103 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 104 105 if (rc != MBX_SUCCESS) { 106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 107 "0324 Config Port initialization " 108 "error, mbxCmd x%x READ_NVPARM, " 109 "mbxStatus x%x\n", 110 mb->mbxCommand, mb->mbxStatus); 111 mempool_free(pmb, phba->mbox_mem_pool); 112 return -ERESTART; 113 } 114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 115 sizeof(phba->wwnn)); 116 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 117 sizeof(phba->wwpn)); 118 } 119 120 phba->sli3_options = 0x0; 121 122 /* Setup and issue mailbox READ REV command */ 123 lpfc_read_rev(phba, pmb); 124 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 125 if (rc != MBX_SUCCESS) { 126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 127 "0439 Adapter failed to init, mbxCmd x%x " 128 "READ_REV, mbxStatus x%x\n", 129 mb->mbxCommand, mb->mbxStatus); 130 mempool_free( pmb, phba->mbox_mem_pool); 131 return -ERESTART; 132 } 133 134 135 /* 136 * The value of rr must be 1 since the driver set the cv field to 1. 137 * This setting requires the FW to set all revision fields. 138 */ 139 if (mb->un.varRdRev.rr == 0) { 140 vp->rev.rBit = 0; 141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 142 "0440 Adapter failed to init, READ_REV has " 143 "missing revision information.\n"); 144 mempool_free(pmb, phba->mbox_mem_pool); 145 return -ERESTART; 146 } 147 148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) 149 return -EINVAL; 150 151 /* Save information as VPD data */ 152 vp->rev.rBit = 1; 153 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 154 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 155 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 156 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 157 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 158 vp->rev.biuRev = mb->un.varRdRev.biuRev; 159 vp->rev.smRev = mb->un.varRdRev.smRev; 160 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 161 vp->rev.endecRev = mb->un.varRdRev.endecRev; 162 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 163 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 164 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 165 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 166 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 167 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 168 169 /* If the sli feature level is less then 9, we must 170 * tear down all RPIs and VPIs on link down if NPIV 171 * is enabled. 172 */ 173 if (vp->rev.feaLevelHigh < 9) 174 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 175 176 if (lpfc_is_LC_HBA(phba->pcidev->device)) 177 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 178 sizeof (phba->RandomData)); 179 180 /* Get adapter VPD information */ 181 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 182 if (!pmb->context2) 183 goto out_free_mbox; 184 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 185 if (!lpfc_vpd_data) 186 goto out_free_context2; 187 188 do { 189 lpfc_dump_mem(phba, pmb, offset); 190 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 191 192 if (rc != MBX_SUCCESS) { 193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 194 "0441 VPD not present on adapter, " 195 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 196 mb->mbxCommand, mb->mbxStatus); 197 mb->un.varDmp.word_cnt = 0; 198 } 199 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 200 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 201 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 202 mb->un.varDmp.word_cnt); 203 offset += mb->un.varDmp.word_cnt; 204 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 205 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 206 207 kfree(lpfc_vpd_data); 208 out_free_context2: 209 kfree(pmb->context2); 210 out_free_mbox: 211 mempool_free(pmb, phba->mbox_mem_pool); 212 return 0; 213 } 214 215 /* Completion handler for config async event mailbox command. */ 216 static void 217 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 218 { 219 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 220 phba->temp_sensor_support = 1; 221 else 222 phba->temp_sensor_support = 0; 223 mempool_free(pmboxq, phba->mbox_mem_pool); 224 return; 225 } 226 227 /************************************************************************/ 228 /* */ 229 /* lpfc_config_port_post */ 230 /* This routine will do LPFC initialization after the */ 231 /* CONFIG_PORT mailbox command. This will be initialized */ 232 /* as a SLI layer callback routine. */ 233 /* This routine returns 0 on success. Any other return value */ 234 /* indicates an error. */ 235 /* */ 236 /************************************************************************/ 237 int 238 lpfc_config_port_post(struct lpfc_hba *phba) 239 { 240 struct lpfc_vport *vport = phba->pport; 241 LPFC_MBOXQ_t *pmb; 242 MAILBOX_t *mb; 243 struct lpfc_dmabuf *mp; 244 struct lpfc_sli *psli = &phba->sli; 245 uint32_t status, timeout; 246 int i, j; 247 int rc; 248 249 spin_lock_irq(&phba->hbalock); 250 /* 251 * If the Config port completed correctly the HBA is not 252 * over heated any more. 253 */ 254 if (phba->over_temp_state == HBA_OVER_TEMP) 255 phba->over_temp_state = HBA_NORMAL_TEMP; 256 spin_unlock_irq(&phba->hbalock); 257 258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 259 if (!pmb) { 260 phba->link_state = LPFC_HBA_ERROR; 261 return -ENOMEM; 262 } 263 mb = &pmb->mb; 264 265 /* Get login parameters for NID. */ 266 lpfc_read_sparam(phba, pmb, 0); 267 pmb->vport = vport; 268 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 270 "0448 Adapter failed init, mbxCmd x%x " 271 "READ_SPARM mbxStatus x%x\n", 272 mb->mbxCommand, mb->mbxStatus); 273 phba->link_state = LPFC_HBA_ERROR; 274 mp = (struct lpfc_dmabuf *) pmb->context1; 275 mempool_free( pmb, phba->mbox_mem_pool); 276 lpfc_mbuf_free(phba, mp->virt, mp->phys); 277 kfree(mp); 278 return -EIO; 279 } 280 281 mp = (struct lpfc_dmabuf *) pmb->context1; 282 283 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 284 lpfc_mbuf_free(phba, mp->virt, mp->phys); 285 kfree(mp); 286 pmb->context1 = NULL; 287 288 if (phba->cfg_soft_wwnn) 289 u64_to_wwn(phba->cfg_soft_wwnn, 290 vport->fc_sparam.nodeName.u.wwn); 291 if (phba->cfg_soft_wwpn) 292 u64_to_wwn(phba->cfg_soft_wwpn, 293 vport->fc_sparam.portName.u.wwn); 294 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 295 sizeof (struct lpfc_name)); 296 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 297 sizeof (struct lpfc_name)); 298 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 299 /* This should be consolidated into parse_vpd ? - mr */ 300 if (phba->SerialNumber[0] == 0) { 301 uint8_t *outptr; 302 303 outptr = &vport->fc_nodename.u.s.IEEE[0]; 304 for (i = 0; i < 12; i++) { 305 status = *outptr++; 306 j = ((status & 0xf0) >> 4); 307 if (j <= 9) 308 phba->SerialNumber[i] = 309 (char)((uint8_t) 0x30 + (uint8_t) j); 310 else 311 phba->SerialNumber[i] = 312 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 313 i++; 314 j = (status & 0xf); 315 if (j <= 9) 316 phba->SerialNumber[i] = 317 (char)((uint8_t) 0x30 + (uint8_t) j); 318 else 319 phba->SerialNumber[i] = 320 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 321 } 322 } 323 324 lpfc_read_config(phba, pmb); 325 pmb->vport = vport; 326 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 328 "0453 Adapter failed to init, mbxCmd x%x " 329 "READ_CONFIG, mbxStatus x%x\n", 330 mb->mbxCommand, mb->mbxStatus); 331 phba->link_state = LPFC_HBA_ERROR; 332 mempool_free( pmb, phba->mbox_mem_pool); 333 return -EIO; 334 } 335 336 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 337 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 338 phba->cfg_hba_queue_depth = 339 mb->un.varRdConfig.max_xri + 1; 340 341 phba->lmt = mb->un.varRdConfig.lmt; 342 343 /* Get the default values for Model Name and Description */ 344 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 345 346 if ((phba->cfg_link_speed > LINK_SPEED_10G) 347 || ((phba->cfg_link_speed == LINK_SPEED_1G) 348 && !(phba->lmt & LMT_1Gb)) 349 || ((phba->cfg_link_speed == LINK_SPEED_2G) 350 && !(phba->lmt & LMT_2Gb)) 351 || ((phba->cfg_link_speed == LINK_SPEED_4G) 352 && !(phba->lmt & LMT_4Gb)) 353 || ((phba->cfg_link_speed == LINK_SPEED_8G) 354 && !(phba->lmt & LMT_8Gb)) 355 || ((phba->cfg_link_speed == LINK_SPEED_10G) 356 && !(phba->lmt & LMT_10Gb))) { 357 /* Reset link speed to auto */ 358 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 359 "1302 Invalid speed for this board: " 360 "Reset link speed to auto: x%x\n", 361 phba->cfg_link_speed); 362 phba->cfg_link_speed = LINK_SPEED_AUTO; 363 } 364 365 phba->link_state = LPFC_LINK_DOWN; 366 367 /* Only process IOCBs on ELS ring till hba_state is READY */ 368 if (psli->ring[psli->extra_ring].cmdringaddr) 369 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 370 if (psli->ring[psli->fcp_ring].cmdringaddr) 371 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 372 if (psli->ring[psli->next_ring].cmdringaddr) 373 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 374 375 /* Post receive buffers for desired rings */ 376 if (phba->sli_rev != 3) 377 lpfc_post_rcv_buf(phba); 378 379 /* Enable appropriate host interrupts */ 380 spin_lock_irq(&phba->hbalock); 381 status = readl(phba->HCregaddr); 382 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 383 if (psli->num_rings > 0) 384 status |= HC_R0INT_ENA; 385 if (psli->num_rings > 1) 386 status |= HC_R1INT_ENA; 387 if (psli->num_rings > 2) 388 status |= HC_R2INT_ENA; 389 if (psli->num_rings > 3) 390 status |= HC_R3INT_ENA; 391 392 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 393 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 394 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 395 396 writel(status, phba->HCregaddr); 397 readl(phba->HCregaddr); /* flush */ 398 spin_unlock_irq(&phba->hbalock); 399 400 /* 401 * Setup the ring 0 (els) timeout handler 402 */ 403 timeout = phba->fc_ratov << 1; 404 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 405 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 406 phba->hb_outstanding = 0; 407 phba->last_completion_time = jiffies; 408 409 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 410 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 411 pmb->vport = vport; 412 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 413 lpfc_set_loopback_flag(phba); 414 if (rc != MBX_SUCCESS) { 415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 416 "0454 Adapter failed to init, mbxCmd x%x " 417 "INIT_LINK, mbxStatus x%x\n", 418 mb->mbxCommand, mb->mbxStatus); 419 420 /* Clear all interrupt enable conditions */ 421 writel(0, phba->HCregaddr); 422 readl(phba->HCregaddr); /* flush */ 423 /* Clear all pending interrupts */ 424 writel(0xffffffff, phba->HAregaddr); 425 readl(phba->HAregaddr); /* flush */ 426 427 phba->link_state = LPFC_HBA_ERROR; 428 if (rc != MBX_BUSY) 429 mempool_free(pmb, phba->mbox_mem_pool); 430 return -EIO; 431 } 432 /* MBOX buffer will be freed in mbox compl */ 433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 434 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 435 pmb->mbox_cmpl = lpfc_config_async_cmpl; 436 pmb->vport = phba->pport; 437 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 438 439 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 440 lpfc_printf_log(phba, 441 KERN_ERR, 442 LOG_INIT, 443 "0456 Adapter failed to issue " 444 "ASYNCEVT_ENABLE mbox status x%x \n.", 445 rc); 446 mempool_free(pmb, phba->mbox_mem_pool); 447 } 448 return (0); 449 } 450 451 /************************************************************************/ 452 /* */ 453 /* lpfc_hba_down_prep */ 454 /* This routine will do LPFC uninitialization before the */ 455 /* HBA is reset when bringing down the SLI Layer. This will be */ 456 /* initialized as a SLI layer callback routine. */ 457 /* This routine returns 0 on success. Any other return value */ 458 /* indicates an error. */ 459 /* */ 460 /************************************************************************/ 461 int 462 lpfc_hba_down_prep(struct lpfc_hba *phba) 463 { 464 struct lpfc_vport **vports; 465 int i; 466 /* Disable interrupts */ 467 writel(0, phba->HCregaddr); 468 readl(phba->HCregaddr); /* flush */ 469 470 if (phba->pport->load_flag & FC_UNLOADING) 471 lpfc_cleanup_discovery_resources(phba->pport); 472 else { 473 vports = lpfc_create_vport_work_array(phba); 474 if (vports != NULL) 475 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 476 lpfc_cleanup_discovery_resources(vports[i]); 477 lpfc_destroy_vport_work_array(phba, vports); 478 } 479 return 0; 480 } 481 482 /************************************************************************/ 483 /* */ 484 /* lpfc_hba_down_post */ 485 /* This routine will do uninitialization after the HBA is reset */ 486 /* when bringing down the SLI Layer. */ 487 /* This routine returns 0 on success. Any other return value */ 488 /* indicates an error. */ 489 /* */ 490 /************************************************************************/ 491 int 492 lpfc_hba_down_post(struct lpfc_hba *phba) 493 { 494 struct lpfc_sli *psli = &phba->sli; 495 struct lpfc_sli_ring *pring; 496 struct lpfc_dmabuf *mp, *next_mp; 497 struct lpfc_iocbq *iocb; 498 IOCB_t *cmd = NULL; 499 LIST_HEAD(completions); 500 int i; 501 502 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 503 lpfc_sli_hbqbuf_free_all(phba); 504 else { 505 /* Cleanup preposted buffers on the ELS ring */ 506 pring = &psli->ring[LPFC_ELS_RING]; 507 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 508 list_del(&mp->list); 509 pring->postbufq_cnt--; 510 lpfc_mbuf_free(phba, mp->virt, mp->phys); 511 kfree(mp); 512 } 513 } 514 515 spin_lock_irq(&phba->hbalock); 516 for (i = 0; i < psli->num_rings; i++) { 517 pring = &psli->ring[i]; 518 519 /* At this point in time the HBA is either reset or DOA. Either 520 * way, nothing should be on txcmplq as it will NEVER complete. 521 */ 522 list_splice_init(&pring->txcmplq, &completions); 523 pring->txcmplq_cnt = 0; 524 spin_unlock_irq(&phba->hbalock); 525 526 while (!list_empty(&completions)) { 527 iocb = list_get_first(&completions, struct lpfc_iocbq, 528 list); 529 cmd = &iocb->iocb; 530 list_del_init(&iocb->list); 531 532 if (!iocb->iocb_cmpl) 533 lpfc_sli_release_iocbq(phba, iocb); 534 else { 535 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 536 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 537 (iocb->iocb_cmpl) (phba, iocb, iocb); 538 } 539 } 540 541 lpfc_sli_abort_iocb_ring(phba, pring); 542 spin_lock_irq(&phba->hbalock); 543 } 544 spin_unlock_irq(&phba->hbalock); 545 546 return 0; 547 } 548 549 /* HBA heart beat timeout handler */ 550 static void 551 lpfc_hb_timeout(unsigned long ptr) 552 { 553 struct lpfc_hba *phba; 554 unsigned long iflag; 555 556 phba = (struct lpfc_hba *)ptr; 557 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 558 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) 559 phba->pport->work_port_events |= WORKER_HB_TMO; 560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 561 562 if (phba->work_wait) 563 wake_up(phba->work_wait); 564 return; 565 } 566 567 static void 568 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 569 { 570 unsigned long drvr_flag; 571 572 spin_lock_irqsave(&phba->hbalock, drvr_flag); 573 phba->hb_outstanding = 0; 574 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 575 576 mempool_free(pmboxq, phba->mbox_mem_pool); 577 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 578 !(phba->link_state == LPFC_HBA_ERROR) && 579 !(phba->pport->load_flag & FC_UNLOADING)) 580 mod_timer(&phba->hb_tmofunc, 581 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 582 return; 583 } 584 585 void 586 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 587 { 588 LPFC_MBOXQ_t *pmboxq; 589 struct lpfc_dmabuf *buf_ptr; 590 int retval; 591 struct lpfc_sli *psli = &phba->sli; 592 LIST_HEAD(completions); 593 594 if ((phba->link_state == LPFC_HBA_ERROR) || 595 (phba->pport->load_flag & FC_UNLOADING) || 596 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 597 return; 598 599 spin_lock_irq(&phba->pport->work_port_lock); 600 /* If the timer is already canceled do nothing */ 601 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { 602 spin_unlock_irq(&phba->pport->work_port_lock); 603 return; 604 } 605 606 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 607 jiffies)) { 608 spin_unlock_irq(&phba->pport->work_port_lock); 609 if (!phba->hb_outstanding) 610 mod_timer(&phba->hb_tmofunc, 611 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 612 else 613 mod_timer(&phba->hb_tmofunc, 614 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 615 return; 616 } 617 spin_unlock_irq(&phba->pport->work_port_lock); 618 619 if (phba->elsbuf_cnt && 620 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 621 spin_lock_irq(&phba->hbalock); 622 list_splice_init(&phba->elsbuf, &completions); 623 phba->elsbuf_cnt = 0; 624 phba->elsbuf_prev_cnt = 0; 625 spin_unlock_irq(&phba->hbalock); 626 627 while (!list_empty(&completions)) { 628 list_remove_head(&completions, buf_ptr, 629 struct lpfc_dmabuf, list); 630 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 631 kfree(buf_ptr); 632 } 633 } 634 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 635 636 /* If there is no heart beat outstanding, issue a heartbeat command */ 637 if (phba->cfg_enable_hba_heartbeat) { 638 if (!phba->hb_outstanding) { 639 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 640 if (!pmboxq) { 641 mod_timer(&phba->hb_tmofunc, 642 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 643 return; 644 } 645 646 lpfc_heart_beat(phba, pmboxq); 647 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 648 pmboxq->vport = phba->pport; 649 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 650 651 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 652 mempool_free(pmboxq, phba->mbox_mem_pool); 653 mod_timer(&phba->hb_tmofunc, 654 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 655 return; 656 } 657 mod_timer(&phba->hb_tmofunc, 658 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 659 phba->hb_outstanding = 1; 660 return; 661 } else { 662 /* 663 * If heart beat timeout called with hb_outstanding set 664 * we need to take the HBA offline. 665 */ 666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 667 "0459 Adapter heartbeat failure, " 668 "taking this port offline.\n"); 669 670 spin_lock_irq(&phba->hbalock); 671 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 672 spin_unlock_irq(&phba->hbalock); 673 674 lpfc_offline_prep(phba); 675 lpfc_offline(phba); 676 lpfc_unblock_mgmt_io(phba); 677 phba->link_state = LPFC_HBA_ERROR; 678 lpfc_hba_down_post(phba); 679 } 680 } 681 } 682 683 static void 684 lpfc_offline_eratt(struct lpfc_hba *phba) 685 { 686 struct lpfc_sli *psli = &phba->sli; 687 688 spin_lock_irq(&phba->hbalock); 689 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 690 spin_unlock_irq(&phba->hbalock); 691 lpfc_offline_prep(phba); 692 693 lpfc_offline(phba); 694 lpfc_reset_barrier(phba); 695 lpfc_sli_brdreset(phba); 696 lpfc_hba_down_post(phba); 697 lpfc_sli_brdready(phba, HS_MBRDY); 698 lpfc_unblock_mgmt_io(phba); 699 phba->link_state = LPFC_HBA_ERROR; 700 return; 701 } 702 703 /************************************************************************/ 704 /* */ 705 /* lpfc_handle_eratt */ 706 /* This routine will handle processing a Host Attention */ 707 /* Error Status event. This will be initialized */ 708 /* as a SLI layer callback routine. */ 709 /* */ 710 /************************************************************************/ 711 void 712 lpfc_handle_eratt(struct lpfc_hba *phba) 713 { 714 struct lpfc_vport *vport = phba->pport; 715 struct lpfc_sli *psli = &phba->sli; 716 struct lpfc_sli_ring *pring; 717 struct lpfc_vport **vports; 718 uint32_t event_data; 719 unsigned long temperature; 720 struct temp_event temp_event_data; 721 struct Scsi_Host *shost; 722 int i; 723 724 /* If the pci channel is offline, ignore possible errors, 725 * since we cannot communicate with the pci card anyway. */ 726 if (pci_channel_offline(phba->pcidev)) 727 return; 728 /* If resets are disabled then leave the HBA alone and return */ 729 if (!phba->cfg_enable_hba_reset) 730 return; 731 732 if (phba->work_hs & HS_FFER6 || 733 phba->work_hs & HS_FFER5) { 734 /* Re-establishing Link */ 735 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 736 "1301 Re-establishing Link " 737 "Data: x%x x%x x%x\n", 738 phba->work_hs, 739 phba->work_status[0], phba->work_status[1]); 740 vports = lpfc_create_vport_work_array(phba); 741 if (vports != NULL) 742 for(i = 0; 743 i <= phba->max_vpi && vports[i] != NULL; 744 i++){ 745 shost = lpfc_shost_from_vport(vports[i]); 746 spin_lock_irq(shost->host_lock); 747 vports[i]->fc_flag |= FC_ESTABLISH_LINK; 748 spin_unlock_irq(shost->host_lock); 749 } 750 lpfc_destroy_vport_work_array(phba, vports); 751 spin_lock_irq(&phba->hbalock); 752 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 753 spin_unlock_irq(&phba->hbalock); 754 755 /* 756 * Firmware stops when it triggled erratt with HS_FFER6. 757 * That could cause the I/Os dropped by the firmware. 758 * Error iocb (I/O) on txcmplq and let the SCSI layer 759 * retry it after re-establishing link. 760 */ 761 pring = &psli->ring[psli->fcp_ring]; 762 lpfc_sli_abort_iocb_ring(phba, pring); 763 764 765 /* 766 * There was a firmware error. Take the hba offline and then 767 * attempt to restart it. 768 */ 769 lpfc_offline_prep(phba); 770 lpfc_offline(phba); 771 lpfc_sli_brdrestart(phba); 772 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 773 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 774 lpfc_unblock_mgmt_io(phba); 775 return; 776 } 777 lpfc_unblock_mgmt_io(phba); 778 } else if (phba->work_hs & HS_CRIT_TEMP) { 779 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 780 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 781 temp_event_data.event_code = LPFC_CRIT_TEMP; 782 temp_event_data.data = (uint32_t)temperature; 783 784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 785 "0459 Adapter maximum temperature exceeded " 786 "(%ld), taking this port offline " 787 "Data: x%x x%x x%x\n", 788 temperature, phba->work_hs, 789 phba->work_status[0], phba->work_status[1]); 790 791 shost = lpfc_shost_from_vport(phba->pport); 792 fc_host_post_vendor_event(shost, fc_get_event_number(), 793 sizeof(temp_event_data), 794 (char *) &temp_event_data, 795 SCSI_NL_VID_TYPE_PCI 796 | PCI_VENDOR_ID_EMULEX); 797 798 spin_lock_irq(&phba->hbalock); 799 phba->over_temp_state = HBA_OVER_TEMP; 800 spin_unlock_irq(&phba->hbalock); 801 lpfc_offline_eratt(phba); 802 803 } else { 804 /* The if clause above forces this code path when the status 805 * failure is a value other than FFER6. Do not call the offline 806 * twice. This is the adapter hardware error path. 807 */ 808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 809 "0457 Adapter Hardware Error " 810 "Data: x%x x%x x%x\n", 811 phba->work_hs, 812 phba->work_status[0], phba->work_status[1]); 813 814 event_data = FC_REG_DUMP_EVENT; 815 shost = lpfc_shost_from_vport(vport); 816 fc_host_post_vendor_event(shost, fc_get_event_number(), 817 sizeof(event_data), (char *) &event_data, 818 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 819 820 lpfc_offline_eratt(phba); 821 } 822 } 823 824 /************************************************************************/ 825 /* */ 826 /* lpfc_handle_latt */ 827 /* This routine will handle processing a Host Attention */ 828 /* Link Status event. This will be initialized */ 829 /* as a SLI layer callback routine. */ 830 /* */ 831 /************************************************************************/ 832 void 833 lpfc_handle_latt(struct lpfc_hba *phba) 834 { 835 struct lpfc_vport *vport = phba->pport; 836 struct lpfc_sli *psli = &phba->sli; 837 LPFC_MBOXQ_t *pmb; 838 volatile uint32_t control; 839 struct lpfc_dmabuf *mp; 840 int rc = 0; 841 842 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 843 if (!pmb) { 844 rc = 1; 845 goto lpfc_handle_latt_err_exit; 846 } 847 848 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 849 if (!mp) { 850 rc = 2; 851 goto lpfc_handle_latt_free_pmb; 852 } 853 854 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 855 if (!mp->virt) { 856 rc = 3; 857 goto lpfc_handle_latt_free_mp; 858 } 859 860 /* Cleanup any outstanding ELS commands */ 861 lpfc_els_flush_all_cmd(phba); 862 863 psli->slistat.link_event++; 864 lpfc_read_la(phba, pmb, mp); 865 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 866 pmb->vport = vport; 867 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 868 if (rc == MBX_NOT_FINISHED) { 869 rc = 4; 870 goto lpfc_handle_latt_free_mbuf; 871 } 872 873 /* Clear Link Attention in HA REG */ 874 spin_lock_irq(&phba->hbalock); 875 writel(HA_LATT, phba->HAregaddr); 876 readl(phba->HAregaddr); /* flush */ 877 spin_unlock_irq(&phba->hbalock); 878 879 return; 880 881 lpfc_handle_latt_free_mbuf: 882 lpfc_mbuf_free(phba, mp->virt, mp->phys); 883 lpfc_handle_latt_free_mp: 884 kfree(mp); 885 lpfc_handle_latt_free_pmb: 886 mempool_free(pmb, phba->mbox_mem_pool); 887 lpfc_handle_latt_err_exit: 888 /* Enable Link attention interrupts */ 889 spin_lock_irq(&phba->hbalock); 890 psli->sli_flag |= LPFC_PROCESS_LA; 891 control = readl(phba->HCregaddr); 892 control |= HC_LAINT_ENA; 893 writel(control, phba->HCregaddr); 894 readl(phba->HCregaddr); /* flush */ 895 896 /* Clear Link Attention in HA REG */ 897 writel(HA_LATT, phba->HAregaddr); 898 readl(phba->HAregaddr); /* flush */ 899 spin_unlock_irq(&phba->hbalock); 900 lpfc_linkdown(phba); 901 phba->link_state = LPFC_HBA_ERROR; 902 903 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 904 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 905 906 return; 907 } 908 909 /************************************************************************/ 910 /* */ 911 /* lpfc_parse_vpd */ 912 /* This routine will parse the VPD data */ 913 /* */ 914 /************************************************************************/ 915 static int 916 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 917 { 918 uint8_t lenlo, lenhi; 919 int Length; 920 int i, j; 921 int finished = 0; 922 int index = 0; 923 924 if (!vpd) 925 return 0; 926 927 /* Vital Product */ 928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 929 "0455 Vital Product Data: x%x x%x x%x x%x\n", 930 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 931 (uint32_t) vpd[3]); 932 while (!finished && (index < (len - 4))) { 933 switch (vpd[index]) { 934 case 0x82: 935 case 0x91: 936 index += 1; 937 lenlo = vpd[index]; 938 index += 1; 939 lenhi = vpd[index]; 940 index += 1; 941 i = ((((unsigned short)lenhi) << 8) + lenlo); 942 index += i; 943 break; 944 case 0x90: 945 index += 1; 946 lenlo = vpd[index]; 947 index += 1; 948 lenhi = vpd[index]; 949 index += 1; 950 Length = ((((unsigned short)lenhi) << 8) + lenlo); 951 if (Length > len - index) 952 Length = len - index; 953 while (Length > 0) { 954 /* Look for Serial Number */ 955 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 956 index += 2; 957 i = vpd[index]; 958 index += 1; 959 j = 0; 960 Length -= (3+i); 961 while(i--) { 962 phba->SerialNumber[j++] = vpd[index++]; 963 if (j == 31) 964 break; 965 } 966 phba->SerialNumber[j] = 0; 967 continue; 968 } 969 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 970 phba->vpd_flag |= VPD_MODEL_DESC; 971 index += 2; 972 i = vpd[index]; 973 index += 1; 974 j = 0; 975 Length -= (3+i); 976 while(i--) { 977 phba->ModelDesc[j++] = vpd[index++]; 978 if (j == 255) 979 break; 980 } 981 phba->ModelDesc[j] = 0; 982 continue; 983 } 984 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 985 phba->vpd_flag |= VPD_MODEL_NAME; 986 index += 2; 987 i = vpd[index]; 988 index += 1; 989 j = 0; 990 Length -= (3+i); 991 while(i--) { 992 phba->ModelName[j++] = vpd[index++]; 993 if (j == 79) 994 break; 995 } 996 phba->ModelName[j] = 0; 997 continue; 998 } 999 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1000 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1001 index += 2; 1002 i = vpd[index]; 1003 index += 1; 1004 j = 0; 1005 Length -= (3+i); 1006 while(i--) { 1007 phba->ProgramType[j++] = vpd[index++]; 1008 if (j == 255) 1009 break; 1010 } 1011 phba->ProgramType[j] = 0; 1012 continue; 1013 } 1014 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1015 phba->vpd_flag |= VPD_PORT; 1016 index += 2; 1017 i = vpd[index]; 1018 index += 1; 1019 j = 0; 1020 Length -= (3+i); 1021 while(i--) { 1022 phba->Port[j++] = vpd[index++]; 1023 if (j == 19) 1024 break; 1025 } 1026 phba->Port[j] = 0; 1027 continue; 1028 } 1029 else { 1030 index += 2; 1031 i = vpd[index]; 1032 index += 1; 1033 index += i; 1034 Length -= (3 + i); 1035 } 1036 } 1037 finished = 0; 1038 break; 1039 case 0x78: 1040 finished = 1; 1041 break; 1042 default: 1043 index ++; 1044 break; 1045 } 1046 } 1047 1048 return(1); 1049 } 1050 1051 static void 1052 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1053 { 1054 lpfc_vpd_t *vp; 1055 uint16_t dev_id = phba->pcidev->device; 1056 int max_speed; 1057 struct { 1058 char * name; 1059 int max_speed; 1060 char * bus; 1061 } m = {"<Unknown>", 0, ""}; 1062 1063 if (mdp && mdp[0] != '\0' 1064 && descp && descp[0] != '\0') 1065 return; 1066 1067 if (phba->lmt & LMT_10Gb) 1068 max_speed = 10; 1069 else if (phba->lmt & LMT_8Gb) 1070 max_speed = 8; 1071 else if (phba->lmt & LMT_4Gb) 1072 max_speed = 4; 1073 else if (phba->lmt & LMT_2Gb) 1074 max_speed = 2; 1075 else 1076 max_speed = 1; 1077 1078 vp = &phba->vpd; 1079 1080 switch (dev_id) { 1081 case PCI_DEVICE_ID_FIREFLY: 1082 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1083 break; 1084 case PCI_DEVICE_ID_SUPERFLY: 1085 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1086 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1087 else 1088 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1089 break; 1090 case PCI_DEVICE_ID_DRAGONFLY: 1091 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1092 break; 1093 case PCI_DEVICE_ID_CENTAUR: 1094 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1095 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1096 else 1097 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1098 break; 1099 case PCI_DEVICE_ID_RFLY: 1100 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1101 break; 1102 case PCI_DEVICE_ID_PEGASUS: 1103 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1104 break; 1105 case PCI_DEVICE_ID_THOR: 1106 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1107 break; 1108 case PCI_DEVICE_ID_VIPER: 1109 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1110 break; 1111 case PCI_DEVICE_ID_PFLY: 1112 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1113 break; 1114 case PCI_DEVICE_ID_TFLY: 1115 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1116 break; 1117 case PCI_DEVICE_ID_HELIOS: 1118 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1119 break; 1120 case PCI_DEVICE_ID_HELIOS_SCSP: 1121 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1122 break; 1123 case PCI_DEVICE_ID_HELIOS_DCSP: 1124 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1125 break; 1126 case PCI_DEVICE_ID_NEPTUNE: 1127 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1128 break; 1129 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1130 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1131 break; 1132 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1133 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1134 break; 1135 case PCI_DEVICE_ID_BMID: 1136 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1137 break; 1138 case PCI_DEVICE_ID_BSMB: 1139 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1140 break; 1141 case PCI_DEVICE_ID_ZEPHYR: 1142 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1143 break; 1144 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1145 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1146 break; 1147 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1148 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 1149 break; 1150 case PCI_DEVICE_ID_ZMID: 1151 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1152 break; 1153 case PCI_DEVICE_ID_ZSMB: 1154 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1155 break; 1156 case PCI_DEVICE_ID_LP101: 1157 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1158 break; 1159 case PCI_DEVICE_ID_LP10000S: 1160 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1161 break; 1162 case PCI_DEVICE_ID_LP11000S: 1163 m = (typeof(m)){"LP11000-S", max_speed, 1164 "PCI-X2"}; 1165 break; 1166 case PCI_DEVICE_ID_LPE11000S: 1167 m = (typeof(m)){"LPe11000-S", max_speed, 1168 "PCIe"}; 1169 break; 1170 case PCI_DEVICE_ID_SAT: 1171 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1172 break; 1173 case PCI_DEVICE_ID_SAT_MID: 1174 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1175 break; 1176 case PCI_DEVICE_ID_SAT_SMB: 1177 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1178 break; 1179 case PCI_DEVICE_ID_SAT_DCSP: 1180 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1181 break; 1182 case PCI_DEVICE_ID_SAT_SCSP: 1183 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1184 break; 1185 case PCI_DEVICE_ID_SAT_S: 1186 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1187 break; 1188 default: 1189 m = (typeof(m)){ NULL }; 1190 break; 1191 } 1192 1193 if (mdp && mdp[0] == '\0') 1194 snprintf(mdp, 79,"%s", m.name); 1195 if (descp && descp[0] == '\0') 1196 snprintf(descp, 255, 1197 "Emulex %s %dGb %s Fibre Channel Adapter", 1198 m.name, m.max_speed, m.bus); 1199 } 1200 1201 /**************************************************/ 1202 /* lpfc_post_buffer */ 1203 /* */ 1204 /* This routine will post count buffers to the */ 1205 /* ring with the QUE_RING_BUF_CN command. This */ 1206 /* allows 3 buffers / command to be posted. */ 1207 /* Returns the number of buffers NOT posted. */ 1208 /**************************************************/ 1209 int 1210 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, 1211 int type) 1212 { 1213 IOCB_t *icmd; 1214 struct lpfc_iocbq *iocb; 1215 struct lpfc_dmabuf *mp1, *mp2; 1216 1217 cnt += pring->missbufcnt; 1218 1219 /* While there are buffers to post */ 1220 while (cnt > 0) { 1221 /* Allocate buffer for command iocb */ 1222 iocb = lpfc_sli_get_iocbq(phba); 1223 if (iocb == NULL) { 1224 pring->missbufcnt = cnt; 1225 return cnt; 1226 } 1227 icmd = &iocb->iocb; 1228 1229 /* 2 buffers can be posted per command */ 1230 /* Allocate buffer to post */ 1231 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1232 if (mp1) 1233 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1234 if (!mp1 || !mp1->virt) { 1235 kfree(mp1); 1236 lpfc_sli_release_iocbq(phba, iocb); 1237 pring->missbufcnt = cnt; 1238 return cnt; 1239 } 1240 1241 INIT_LIST_HEAD(&mp1->list); 1242 /* Allocate buffer to post */ 1243 if (cnt > 1) { 1244 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1245 if (mp2) 1246 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1247 &mp2->phys); 1248 if (!mp2 || !mp2->virt) { 1249 kfree(mp2); 1250 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1251 kfree(mp1); 1252 lpfc_sli_release_iocbq(phba, iocb); 1253 pring->missbufcnt = cnt; 1254 return cnt; 1255 } 1256 1257 INIT_LIST_HEAD(&mp2->list); 1258 } else { 1259 mp2 = NULL; 1260 } 1261 1262 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1263 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1264 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1265 icmd->ulpBdeCount = 1; 1266 cnt--; 1267 if (mp2) { 1268 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1269 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1270 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1271 cnt--; 1272 icmd->ulpBdeCount = 2; 1273 } 1274 1275 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1276 icmd->ulpLe = 1; 1277 1278 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1279 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1280 kfree(mp1); 1281 cnt++; 1282 if (mp2) { 1283 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1284 kfree(mp2); 1285 cnt++; 1286 } 1287 lpfc_sli_release_iocbq(phba, iocb); 1288 pring->missbufcnt = cnt; 1289 return cnt; 1290 } 1291 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1292 if (mp2) 1293 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1294 } 1295 pring->missbufcnt = 0; 1296 return 0; 1297 } 1298 1299 /************************************************************************/ 1300 /* */ 1301 /* lpfc_post_rcv_buf */ 1302 /* This routine post initial rcv buffers to the configured rings */ 1303 /* */ 1304 /************************************************************************/ 1305 static int 1306 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1307 { 1308 struct lpfc_sli *psli = &phba->sli; 1309 1310 /* Ring 0, ELS / CT buffers */ 1311 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1312 /* Ring 2 - FCP no buffers needed */ 1313 1314 return 0; 1315 } 1316 1317 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1318 1319 /************************************************************************/ 1320 /* */ 1321 /* lpfc_sha_init */ 1322 /* */ 1323 /************************************************************************/ 1324 static void 1325 lpfc_sha_init(uint32_t * HashResultPointer) 1326 { 1327 HashResultPointer[0] = 0x67452301; 1328 HashResultPointer[1] = 0xEFCDAB89; 1329 HashResultPointer[2] = 0x98BADCFE; 1330 HashResultPointer[3] = 0x10325476; 1331 HashResultPointer[4] = 0xC3D2E1F0; 1332 } 1333 1334 /************************************************************************/ 1335 /* */ 1336 /* lpfc_sha_iterate */ 1337 /* */ 1338 /************************************************************************/ 1339 static void 1340 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1341 { 1342 int t; 1343 uint32_t TEMP; 1344 uint32_t A, B, C, D, E; 1345 t = 16; 1346 do { 1347 HashWorkingPointer[t] = 1348 S(1, 1349 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1350 8] ^ 1351 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1352 } while (++t <= 79); 1353 t = 0; 1354 A = HashResultPointer[0]; 1355 B = HashResultPointer[1]; 1356 C = HashResultPointer[2]; 1357 D = HashResultPointer[3]; 1358 E = HashResultPointer[4]; 1359 1360 do { 1361 if (t < 20) { 1362 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1363 } else if (t < 40) { 1364 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1365 } else if (t < 60) { 1366 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1367 } else { 1368 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1369 } 1370 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1371 E = D; 1372 D = C; 1373 C = S(30, B); 1374 B = A; 1375 A = TEMP; 1376 } while (++t <= 79); 1377 1378 HashResultPointer[0] += A; 1379 HashResultPointer[1] += B; 1380 HashResultPointer[2] += C; 1381 HashResultPointer[3] += D; 1382 HashResultPointer[4] += E; 1383 1384 } 1385 1386 /************************************************************************/ 1387 /* */ 1388 /* lpfc_challenge_key */ 1389 /* */ 1390 /************************************************************************/ 1391 static void 1392 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1393 { 1394 *HashWorking = (*RandomChallenge ^ *HashWorking); 1395 } 1396 1397 /************************************************************************/ 1398 /* */ 1399 /* lpfc_hba_init */ 1400 /* */ 1401 /************************************************************************/ 1402 void 1403 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1404 { 1405 int t; 1406 uint32_t *HashWorking; 1407 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1408 1409 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1410 if (!HashWorking) 1411 return; 1412 1413 HashWorking[0] = HashWorking[78] = *pwwnn++; 1414 HashWorking[1] = HashWorking[79] = *pwwnn; 1415 1416 for (t = 0; t < 7; t++) 1417 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1418 1419 lpfc_sha_init(hbainit); 1420 lpfc_sha_iterate(hbainit, HashWorking); 1421 kfree(HashWorking); 1422 } 1423 1424 void 1425 lpfc_cleanup(struct lpfc_vport *vport) 1426 { 1427 struct lpfc_hba *phba = vport->phba; 1428 struct lpfc_nodelist *ndlp, *next_ndlp; 1429 int i = 0; 1430 1431 if (phba->link_state > LPFC_LINK_DOWN) 1432 lpfc_port_link_failure(vport); 1433 1434 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1435 if (!NLP_CHK_NODE_ACT(ndlp)) { 1436 ndlp = lpfc_enable_node(vport, ndlp, 1437 NLP_STE_UNUSED_NODE); 1438 if (!ndlp) 1439 continue; 1440 spin_lock_irq(&phba->ndlp_lock); 1441 NLP_SET_FREE_REQ(ndlp); 1442 spin_unlock_irq(&phba->ndlp_lock); 1443 /* Trigger the release of the ndlp memory */ 1444 lpfc_nlp_put(ndlp); 1445 continue; 1446 } 1447 spin_lock_irq(&phba->ndlp_lock); 1448 if (NLP_CHK_FREE_REQ(ndlp)) { 1449 /* The ndlp should not be in memory free mode already */ 1450 spin_unlock_irq(&phba->ndlp_lock); 1451 continue; 1452 } else 1453 /* Indicate request for freeing ndlp memory */ 1454 NLP_SET_FREE_REQ(ndlp); 1455 spin_unlock_irq(&phba->ndlp_lock); 1456 1457 if (ndlp->nlp_type & NLP_FABRIC) 1458 lpfc_disc_state_machine(vport, ndlp, NULL, 1459 NLP_EVT_DEVICE_RECOVERY); 1460 1461 lpfc_disc_state_machine(vport, ndlp, NULL, 1462 NLP_EVT_DEVICE_RM); 1463 } 1464 1465 /* At this point, ALL ndlp's should be gone 1466 * because of the previous NLP_EVT_DEVICE_RM. 1467 * Lets wait for this to happen, if needed. 1468 */ 1469 while (!list_empty(&vport->fc_nodes)) { 1470 1471 if (i++ > 3000) { 1472 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1473 "0233 Nodelist not empty\n"); 1474 list_for_each_entry_safe(ndlp, next_ndlp, 1475 &vport->fc_nodes, nlp_listp) { 1476 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1477 LOG_NODE, 1478 "0282: did:x%x ndlp:x%p " 1479 "usgmap:x%x refcnt:%d\n", 1480 ndlp->nlp_DID, (void *)ndlp, 1481 ndlp->nlp_usg_map, 1482 atomic_read( 1483 &ndlp->kref.refcount)); 1484 } 1485 break; 1486 } 1487 1488 /* Wait for any activity on ndlps to settle */ 1489 msleep(10); 1490 } 1491 return; 1492 } 1493 1494 static void 1495 lpfc_establish_link_tmo(unsigned long ptr) 1496 { 1497 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 1498 struct lpfc_vport **vports; 1499 unsigned long iflag; 1500 int i; 1501 1502 /* Re-establishing Link, timer expired */ 1503 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1504 "1300 Re-establishing Link, timer expired " 1505 "Data: x%x x%x\n", 1506 phba->pport->fc_flag, phba->pport->port_state); 1507 vports = lpfc_create_vport_work_array(phba); 1508 if (vports != NULL) 1509 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1510 struct Scsi_Host *shost; 1511 shost = lpfc_shost_from_vport(vports[i]); 1512 spin_lock_irqsave(shost->host_lock, iflag); 1513 vports[i]->fc_flag &= ~FC_ESTABLISH_LINK; 1514 spin_unlock_irqrestore(shost->host_lock, iflag); 1515 } 1516 lpfc_destroy_vport_work_array(phba, vports); 1517 } 1518 1519 void 1520 lpfc_stop_vport_timers(struct lpfc_vport *vport) 1521 { 1522 del_timer_sync(&vport->els_tmofunc); 1523 del_timer_sync(&vport->fc_fdmitmo); 1524 lpfc_can_disctmo(vport); 1525 return; 1526 } 1527 1528 static void 1529 lpfc_stop_phba_timers(struct lpfc_hba *phba) 1530 { 1531 del_timer_sync(&phba->fcp_poll_timer); 1532 del_timer_sync(&phba->fc_estabtmo); 1533 lpfc_stop_vport_timers(phba->pport); 1534 del_timer_sync(&phba->sli.mbox_tmo); 1535 del_timer_sync(&phba->fabric_block_timer); 1536 phba->hb_outstanding = 0; 1537 del_timer_sync(&phba->hb_tmofunc); 1538 return; 1539 } 1540 1541 static void 1542 lpfc_block_mgmt_io(struct lpfc_hba * phba) 1543 { 1544 unsigned long iflag; 1545 1546 spin_lock_irqsave(&phba->hbalock, iflag); 1547 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 1548 spin_unlock_irqrestore(&phba->hbalock, iflag); 1549 } 1550 1551 int 1552 lpfc_online(struct lpfc_hba *phba) 1553 { 1554 struct lpfc_vport *vport = phba->pport; 1555 struct lpfc_vport **vports; 1556 int i; 1557 1558 if (!phba) 1559 return 0; 1560 1561 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1562 return 0; 1563 1564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1565 "0458 Bring Adapter online\n"); 1566 1567 lpfc_block_mgmt_io(phba); 1568 1569 if (!lpfc_sli_queue_setup(phba)) { 1570 lpfc_unblock_mgmt_io(phba); 1571 return 1; 1572 } 1573 1574 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 1575 lpfc_unblock_mgmt_io(phba); 1576 return 1; 1577 } 1578 1579 vports = lpfc_create_vport_work_array(phba); 1580 if (vports != NULL) 1581 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1582 struct Scsi_Host *shost; 1583 shost = lpfc_shost_from_vport(vports[i]); 1584 spin_lock_irq(shost->host_lock); 1585 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 1586 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 1587 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1588 spin_unlock_irq(shost->host_lock); 1589 } 1590 lpfc_destroy_vport_work_array(phba, vports); 1591 1592 lpfc_unblock_mgmt_io(phba); 1593 return 0; 1594 } 1595 1596 void 1597 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1598 { 1599 unsigned long iflag; 1600 1601 spin_lock_irqsave(&phba->hbalock, iflag); 1602 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 1603 spin_unlock_irqrestore(&phba->hbalock, iflag); 1604 } 1605 1606 void 1607 lpfc_offline_prep(struct lpfc_hba * phba) 1608 { 1609 struct lpfc_vport *vport = phba->pport; 1610 struct lpfc_nodelist *ndlp, *next_ndlp; 1611 struct lpfc_vport **vports; 1612 int i; 1613 1614 if (vport->fc_flag & FC_OFFLINE_MODE) 1615 return; 1616 1617 lpfc_block_mgmt_io(phba); 1618 1619 lpfc_linkdown(phba); 1620 1621 /* Issue an unreg_login to all nodes on all vports */ 1622 vports = lpfc_create_vport_work_array(phba); 1623 if (vports != NULL) { 1624 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1625 struct Scsi_Host *shost; 1626 1627 if (vports[i]->load_flag & FC_UNLOADING) 1628 continue; 1629 shost = lpfc_shost_from_vport(vports[i]); 1630 list_for_each_entry_safe(ndlp, next_ndlp, 1631 &vports[i]->fc_nodes, 1632 nlp_listp) { 1633 if (!NLP_CHK_NODE_ACT(ndlp)) 1634 continue; 1635 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1636 continue; 1637 if (ndlp->nlp_type & NLP_FABRIC) { 1638 lpfc_disc_state_machine(vports[i], ndlp, 1639 NULL, NLP_EVT_DEVICE_RECOVERY); 1640 lpfc_disc_state_machine(vports[i], ndlp, 1641 NULL, NLP_EVT_DEVICE_RM); 1642 } 1643 spin_lock_irq(shost->host_lock); 1644 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1645 spin_unlock_irq(shost->host_lock); 1646 lpfc_unreg_rpi(vports[i], ndlp); 1647 } 1648 } 1649 } 1650 lpfc_destroy_vport_work_array(phba, vports); 1651 1652 lpfc_sli_flush_mbox_queue(phba); 1653 } 1654 1655 void 1656 lpfc_offline(struct lpfc_hba *phba) 1657 { 1658 struct Scsi_Host *shost; 1659 struct lpfc_vport **vports; 1660 int i; 1661 1662 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1663 return; 1664 1665 /* stop all timers associated with this hba */ 1666 lpfc_stop_phba_timers(phba); 1667 vports = lpfc_create_vport_work_array(phba); 1668 if (vports != NULL) 1669 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 1670 lpfc_stop_vport_timers(vports[i]); 1671 lpfc_destroy_vport_work_array(phba, vports); 1672 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1673 "0460 Bring Adapter offline\n"); 1674 /* Bring down the SLI Layer and cleanup. The HBA is offline 1675 now. */ 1676 lpfc_sli_hba_down(phba); 1677 spin_lock_irq(&phba->hbalock); 1678 phba->work_ha = 0; 1679 spin_unlock_irq(&phba->hbalock); 1680 vports = lpfc_create_vport_work_array(phba); 1681 if (vports != NULL) 1682 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1683 shost = lpfc_shost_from_vport(vports[i]); 1684 spin_lock_irq(shost->host_lock); 1685 vports[i]->work_port_events = 0; 1686 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1687 spin_unlock_irq(shost->host_lock); 1688 } 1689 lpfc_destroy_vport_work_array(phba, vports); 1690 } 1691 1692 /****************************************************************************** 1693 * Function name: lpfc_scsi_free 1694 * 1695 * Description: Called from lpfc_pci_remove_one free internal driver resources 1696 * 1697 ******************************************************************************/ 1698 static int 1699 lpfc_scsi_free(struct lpfc_hba *phba) 1700 { 1701 struct lpfc_scsi_buf *sb, *sb_next; 1702 struct lpfc_iocbq *io, *io_next; 1703 1704 spin_lock_irq(&phba->hbalock); 1705 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1706 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1707 list_del(&sb->list); 1708 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1709 sb->dma_handle); 1710 kfree(sb); 1711 phba->total_scsi_bufs--; 1712 } 1713 1714 /* Release all the lpfc_iocbq entries maintained by this host. */ 1715 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1716 list_del(&io->list); 1717 kfree(io); 1718 phba->total_iocbq_bufs--; 1719 } 1720 1721 spin_unlock_irq(&phba->hbalock); 1722 1723 return 0; 1724 } 1725 1726 struct lpfc_vport * 1727 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 1728 { 1729 struct lpfc_vport *vport; 1730 struct Scsi_Host *shost; 1731 int error = 0; 1732 1733 if (dev != &phba->pcidev->dev) 1734 shost = scsi_host_alloc(&lpfc_vport_template, 1735 sizeof(struct lpfc_vport)); 1736 else 1737 shost = scsi_host_alloc(&lpfc_template, 1738 sizeof(struct lpfc_vport)); 1739 if (!shost) 1740 goto out; 1741 1742 vport = (struct lpfc_vport *) shost->hostdata; 1743 vport->phba = phba; 1744 vport->load_flag |= FC_LOADING; 1745 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1746 vport->fc_rscn_flush = 0; 1747 1748 lpfc_get_vport_cfgparam(vport); 1749 shost->unique_id = instance; 1750 shost->max_id = LPFC_MAX_TARGET; 1751 shost->max_lun = vport->cfg_max_luns; 1752 shost->this_id = -1; 1753 shost->max_cmd_len = 16; 1754 /* 1755 * Set initial can_queue value since 0 is no longer supported and 1756 * scsi_add_host will fail. This will be adjusted later based on the 1757 * max xri value determined in hba setup. 1758 */ 1759 shost->can_queue = phba->cfg_hba_queue_depth - 10; 1760 if (dev != &phba->pcidev->dev) { 1761 shost->transportt = lpfc_vport_transport_template; 1762 vport->port_type = LPFC_NPIV_PORT; 1763 } else { 1764 shost->transportt = lpfc_transport_template; 1765 vport->port_type = LPFC_PHYSICAL_PORT; 1766 } 1767 1768 /* Initialize all internally managed lists. */ 1769 INIT_LIST_HEAD(&vport->fc_nodes); 1770 spin_lock_init(&vport->work_port_lock); 1771 1772 init_timer(&vport->fc_disctmo); 1773 vport->fc_disctmo.function = lpfc_disc_timeout; 1774 vport->fc_disctmo.data = (unsigned long)vport; 1775 1776 init_timer(&vport->fc_fdmitmo); 1777 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 1778 vport->fc_fdmitmo.data = (unsigned long)vport; 1779 1780 init_timer(&vport->els_tmofunc); 1781 vport->els_tmofunc.function = lpfc_els_timeout; 1782 vport->els_tmofunc.data = (unsigned long)vport; 1783 1784 error = scsi_add_host(shost, dev); 1785 if (error) 1786 goto out_put_shost; 1787 1788 spin_lock_irq(&phba->hbalock); 1789 list_add_tail(&vport->listentry, &phba->port_list); 1790 spin_unlock_irq(&phba->hbalock); 1791 return vport; 1792 1793 out_put_shost: 1794 scsi_host_put(shost); 1795 out: 1796 return NULL; 1797 } 1798 1799 void 1800 destroy_port(struct lpfc_vport *vport) 1801 { 1802 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1803 struct lpfc_hba *phba = vport->phba; 1804 1805 kfree(vport->vname); 1806 1807 lpfc_debugfs_terminate(vport); 1808 fc_remove_host(shost); 1809 scsi_remove_host(shost); 1810 1811 spin_lock_irq(&phba->hbalock); 1812 list_del_init(&vport->listentry); 1813 spin_unlock_irq(&phba->hbalock); 1814 1815 lpfc_cleanup(vport); 1816 return; 1817 } 1818 1819 int 1820 lpfc_get_instance(void) 1821 { 1822 int instance = 0; 1823 1824 /* Assign an unused number */ 1825 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1826 return -1; 1827 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 1828 return -1; 1829 return instance; 1830 } 1831 1832 /* 1833 * Note: there is no scan_start function as adapter initialization 1834 * will have asynchronously kicked off the link initialization. 1835 */ 1836 1837 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 1838 { 1839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1840 struct lpfc_hba *phba = vport->phba; 1841 int stat = 0; 1842 1843 spin_lock_irq(shost->host_lock); 1844 1845 if (vport->load_flag & FC_UNLOADING) { 1846 stat = 1; 1847 goto finished; 1848 } 1849 if (time >= 30 * HZ) { 1850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1851 "0461 Scanning longer than 30 " 1852 "seconds. Continuing initialization\n"); 1853 stat = 1; 1854 goto finished; 1855 } 1856 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 1857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1858 "0465 Link down longer than 15 " 1859 "seconds. Continuing initialization\n"); 1860 stat = 1; 1861 goto finished; 1862 } 1863 1864 if (vport->port_state != LPFC_VPORT_READY) 1865 goto finished; 1866 if (vport->num_disc_nodes || vport->fc_prli_sent) 1867 goto finished; 1868 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 1869 goto finished; 1870 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 1871 goto finished; 1872 1873 stat = 1; 1874 1875 finished: 1876 spin_unlock_irq(shost->host_lock); 1877 return stat; 1878 } 1879 1880 void lpfc_host_attrib_init(struct Scsi_Host *shost) 1881 { 1882 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1883 struct lpfc_hba *phba = vport->phba; 1884 /* 1885 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 1886 */ 1887 1888 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 1889 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 1890 fc_host_supported_classes(shost) = FC_COS_CLASS3; 1891 1892 memset(fc_host_supported_fc4s(shost), 0, 1893 sizeof(fc_host_supported_fc4s(shost))); 1894 fc_host_supported_fc4s(shost)[2] = 1; 1895 fc_host_supported_fc4s(shost)[7] = 1; 1896 1897 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 1898 sizeof fc_host_symbolic_name(shost)); 1899 1900 fc_host_supported_speeds(shost) = 0; 1901 if (phba->lmt & LMT_10Gb) 1902 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 1903 if (phba->lmt & LMT_8Gb) 1904 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 1905 if (phba->lmt & LMT_4Gb) 1906 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 1907 if (phba->lmt & LMT_2Gb) 1908 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 1909 if (phba->lmt & LMT_1Gb) 1910 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 1911 1912 fc_host_maxframe_size(shost) = 1913 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1914 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 1915 1916 /* This value is also unchanging */ 1917 memset(fc_host_active_fc4s(shost), 0, 1918 sizeof(fc_host_active_fc4s(shost))); 1919 fc_host_active_fc4s(shost)[2] = 1; 1920 fc_host_active_fc4s(shost)[7] = 1; 1921 1922 fc_host_max_npiv_vports(shost) = phba->max_vpi; 1923 spin_lock_irq(shost->host_lock); 1924 vport->load_flag &= ~FC_LOADING; 1925 spin_unlock_irq(shost->host_lock); 1926 } 1927 1928 static int 1929 lpfc_enable_msix(struct lpfc_hba *phba) 1930 { 1931 int error; 1932 1933 phba->msix_entries[0].entry = 0; 1934 phba->msix_entries[0].vector = 0; 1935 1936 error = pci_enable_msix(phba->pcidev, phba->msix_entries, 1937 ARRAY_SIZE(phba->msix_entries)); 1938 if (error) { 1939 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1940 "0420 Enable MSI-X failed (%d), continuing " 1941 "with MSI\n", error); 1942 pci_disable_msix(phba->pcidev); 1943 return error; 1944 } 1945 1946 error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, 1947 LPFC_DRIVER_NAME, phba); 1948 if (error) { 1949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1950 "0421 MSI-X request_irq failed (%d), " 1951 "continuing with MSI\n", error); 1952 pci_disable_msix(phba->pcidev); 1953 } 1954 return error; 1955 } 1956 1957 static void 1958 lpfc_disable_msix(struct lpfc_hba *phba) 1959 { 1960 free_irq(phba->msix_entries[0].vector, phba); 1961 pci_disable_msix(phba->pcidev); 1962 } 1963 1964 static int __devinit 1965 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1966 { 1967 struct lpfc_vport *vport = NULL; 1968 struct lpfc_hba *phba; 1969 struct lpfc_sli *psli; 1970 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1971 struct Scsi_Host *shost = NULL; 1972 void *ptr; 1973 unsigned long bar0map_len, bar2map_len; 1974 int error = -ENODEV, retval; 1975 int i, hbq_count; 1976 uint16_t iotag; 1977 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 1978 1979 if (pci_enable_device_mem(pdev)) 1980 goto out; 1981 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 1982 goto out_disable_device; 1983 1984 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 1985 if (!phba) 1986 goto out_release_regions; 1987 1988 spin_lock_init(&phba->hbalock); 1989 1990 /* Initialize ndlp management spinlock */ 1991 spin_lock_init(&phba->ndlp_lock); 1992 1993 phba->pcidev = pdev; 1994 1995 /* Assign an unused board number */ 1996 if ((phba->brd_no = lpfc_get_instance()) < 0) 1997 goto out_free_phba; 1998 1999 INIT_LIST_HEAD(&phba->port_list); 2000 /* 2001 * Get all the module params for configuring this host and then 2002 * establish the host. 2003 */ 2004 lpfc_get_cfgparam(phba); 2005 phba->max_vpi = LPFC_MAX_VPI; 2006 2007 /* Initialize timers used by driver */ 2008 init_timer(&phba->fc_estabtmo); 2009 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 2010 phba->fc_estabtmo.data = (unsigned long)phba; 2011 2012 init_timer(&phba->hb_tmofunc); 2013 phba->hb_tmofunc.function = lpfc_hb_timeout; 2014 phba->hb_tmofunc.data = (unsigned long)phba; 2015 2016 psli = &phba->sli; 2017 init_timer(&psli->mbox_tmo); 2018 psli->mbox_tmo.function = lpfc_mbox_timeout; 2019 psli->mbox_tmo.data = (unsigned long) phba; 2020 init_timer(&phba->fcp_poll_timer); 2021 phba->fcp_poll_timer.function = lpfc_poll_timeout; 2022 phba->fcp_poll_timer.data = (unsigned long) phba; 2023 init_timer(&phba->fabric_block_timer); 2024 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 2025 phba->fabric_block_timer.data = (unsigned long) phba; 2026 2027 pci_set_master(pdev); 2028 pci_try_set_mwi(pdev); 2029 2030 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 2031 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 2032 goto out_idr_remove; 2033 2034 /* 2035 * Get the bus address of Bar0 and Bar2 and the number of bytes 2036 * required by each mapping. 2037 */ 2038 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 2039 bar0map_len = pci_resource_len(phba->pcidev, 0); 2040 2041 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 2042 bar2map_len = pci_resource_len(phba->pcidev, 2); 2043 2044 /* Map HBA SLIM to a kernel virtual address. */ 2045 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 2046 if (!phba->slim_memmap_p) { 2047 error = -ENODEV; 2048 dev_printk(KERN_ERR, &pdev->dev, 2049 "ioremap failed for SLIM memory.\n"); 2050 goto out_idr_remove; 2051 } 2052 2053 /* Map HBA Control Registers to a kernel virtual address. */ 2054 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 2055 if (!phba->ctrl_regs_memmap_p) { 2056 error = -ENODEV; 2057 dev_printk(KERN_ERR, &pdev->dev, 2058 "ioremap failed for HBA control registers.\n"); 2059 goto out_iounmap_slim; 2060 } 2061 2062 /* Allocate memory for SLI-2 structures */ 2063 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 2064 &phba->slim2p_mapping, GFP_KERNEL); 2065 if (!phba->slim2p) 2066 goto out_iounmap; 2067 2068 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 2069 2070 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 2071 lpfc_sli_hbq_size(), 2072 &phba->hbqslimp.phys, 2073 GFP_KERNEL); 2074 if (!phba->hbqslimp.virt) 2075 goto out_free_slim; 2076 2077 hbq_count = lpfc_sli_hbq_count(); 2078 ptr = phba->hbqslimp.virt; 2079 for (i = 0; i < hbq_count; ++i) { 2080 phba->hbqs[i].hbq_virt = ptr; 2081 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 2082 ptr += (lpfc_hbq_defs[i]->entry_count * 2083 sizeof(struct lpfc_hbq_entry)); 2084 } 2085 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 2086 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 2087 2088 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2089 2090 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2091 lpfc_sli_setup(phba); 2092 lpfc_sli_queue_setup(phba); 2093 2094 retval = lpfc_mem_alloc(phba); 2095 if (retval) { 2096 error = retval; 2097 goto out_free_hbqslimp; 2098 } 2099 2100 /* Initialize and populate the iocb list per host. */ 2101 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2102 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 2103 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 2104 if (iocbq_entry == NULL) { 2105 printk(KERN_ERR "%s: only allocated %d iocbs of " 2106 "expected %d count. Unloading driver.\n", 2107 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2108 error = -ENOMEM; 2109 goto out_free_iocbq; 2110 } 2111 2112 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 2113 if (iotag == 0) { 2114 kfree (iocbq_entry); 2115 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2116 "Unloading driver.\n", 2117 __FUNCTION__); 2118 error = -ENOMEM; 2119 goto out_free_iocbq; 2120 } 2121 2122 spin_lock_irq(&phba->hbalock); 2123 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 2124 phba->total_iocbq_bufs++; 2125 spin_unlock_irq(&phba->hbalock); 2126 } 2127 2128 /* Initialize HBA structure */ 2129 phba->fc_edtov = FF_DEF_EDTOV; 2130 phba->fc_ratov = FF_DEF_RATOV; 2131 phba->fc_altov = FF_DEF_ALTOV; 2132 phba->fc_arbtov = FF_DEF_ARBTOV; 2133 2134 INIT_LIST_HEAD(&phba->work_list); 2135 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2136 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2137 2138 /* Startup the kernel thread for this host adapter. */ 2139 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2140 "lpfc_worker_%d", phba->brd_no); 2141 if (IS_ERR(phba->worker_thread)) { 2142 error = PTR_ERR(phba->worker_thread); 2143 goto out_free_iocbq; 2144 } 2145 2146 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 2147 spin_lock_init(&phba->scsi_buf_list_lock); 2148 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 2149 2150 /* Initialize list of fabric iocbs */ 2151 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2152 2153 /* Initialize list to save ELS buffers */ 2154 INIT_LIST_HEAD(&phba->elsbuf); 2155 2156 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2157 if (!vport) 2158 goto out_kthread_stop; 2159 2160 shost = lpfc_shost_from_vport(vport); 2161 phba->pport = vport; 2162 lpfc_debugfs_initialize(vport); 2163 2164 pci_set_drvdata(pdev, shost); 2165 phba->intr_type = NONE; 2166 2167 if (phba->cfg_use_msi == 2) { 2168 error = lpfc_enable_msix(phba); 2169 if (!error) 2170 phba->intr_type = MSIX; 2171 } 2172 2173 /* Fallback to MSI if MSI-X initialization failed */ 2174 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2175 retval = pci_enable_msi(phba->pcidev); 2176 if (!retval) 2177 phba->intr_type = MSI; 2178 else 2179 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2180 "0452 Enable MSI failed, continuing " 2181 "with IRQ\n"); 2182 } 2183 2184 /* MSI-X is the only case the doesn't need to call request_irq */ 2185 if (phba->intr_type != MSIX) { 2186 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2187 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2188 if (retval) { 2189 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " 2190 "interrupt handler failed\n"); 2191 error = retval; 2192 goto out_disable_msi; 2193 } else if (phba->intr_type != MSI) 2194 phba->intr_type = INTx; 2195 } 2196 2197 phba->MBslimaddr = phba->slim_memmap_p; 2198 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2199 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 2200 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2201 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2202 2203 if (lpfc_alloc_sysfs_attr(vport)) { 2204 error = -ENOMEM; 2205 goto out_free_irq; 2206 } 2207 2208 if (lpfc_sli_hba_setup(phba)) { 2209 error = -ENODEV; 2210 goto out_remove_device; 2211 } 2212 2213 /* 2214 * hba setup may have changed the hba_queue_depth so we need to adjust 2215 * the value of can_queue. 2216 */ 2217 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2218 2219 lpfc_host_attrib_init(shost); 2220 2221 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 2222 spin_lock_irq(shost->host_lock); 2223 lpfc_poll_start_timer(phba); 2224 spin_unlock_irq(shost->host_lock); 2225 } 2226 2227 scsi_scan_host(shost); 2228 2229 return 0; 2230 2231 out_remove_device: 2232 lpfc_free_sysfs_attr(vport); 2233 spin_lock_irq(shost->host_lock); 2234 vport->load_flag |= FC_UNLOADING; 2235 spin_unlock_irq(shost->host_lock); 2236 out_free_irq: 2237 lpfc_stop_phba_timers(phba); 2238 phba->pport->work_port_events = 0; 2239 2240 if (phba->intr_type == MSIX) 2241 lpfc_disable_msix(phba); 2242 else 2243 free_irq(phba->pcidev->irq, phba); 2244 2245 out_disable_msi: 2246 if (phba->intr_type == MSI) 2247 pci_disable_msi(phba->pcidev); 2248 destroy_port(vport); 2249 out_kthread_stop: 2250 kthread_stop(phba->worker_thread); 2251 out_free_iocbq: 2252 list_for_each_entry_safe(iocbq_entry, iocbq_next, 2253 &phba->lpfc_iocb_list, list) { 2254 kfree(iocbq_entry); 2255 phba->total_iocbq_bufs--; 2256 } 2257 lpfc_mem_free(phba); 2258 out_free_hbqslimp: 2259 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2260 phba->hbqslimp.phys); 2261 out_free_slim: 2262 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 2263 phba->slim2p_mapping); 2264 out_iounmap: 2265 iounmap(phba->ctrl_regs_memmap_p); 2266 out_iounmap_slim: 2267 iounmap(phba->slim_memmap_p); 2268 out_idr_remove: 2269 idr_remove(&lpfc_hba_index, phba->brd_no); 2270 out_free_phba: 2271 kfree(phba); 2272 out_release_regions: 2273 pci_release_selected_regions(pdev, bars); 2274 out_disable_device: 2275 pci_disable_device(pdev); 2276 out: 2277 pci_set_drvdata(pdev, NULL); 2278 if (shost) 2279 scsi_host_put(shost); 2280 return error; 2281 } 2282 2283 static void __devexit 2284 lpfc_pci_remove_one(struct pci_dev *pdev) 2285 { 2286 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2287 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2288 struct lpfc_hba *phba = vport->phba; 2289 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2290 2291 spin_lock_irq(&phba->hbalock); 2292 vport->load_flag |= FC_UNLOADING; 2293 spin_unlock_irq(&phba->hbalock); 2294 2295 kfree(vport->vname); 2296 lpfc_free_sysfs_attr(vport); 2297 2298 kthread_stop(phba->worker_thread); 2299 2300 fc_remove_host(shost); 2301 scsi_remove_host(shost); 2302 lpfc_cleanup(vport); 2303 2304 /* 2305 * Bring down the SLI Layer. This step disable all interrupts, 2306 * clears the rings, discards all mailbox commands, and resets 2307 * the HBA. 2308 */ 2309 lpfc_sli_hba_down(phba); 2310 lpfc_sli_brdrestart(phba); 2311 2312 lpfc_stop_phba_timers(phba); 2313 spin_lock_irq(&phba->hbalock); 2314 list_del_init(&vport->listentry); 2315 spin_unlock_irq(&phba->hbalock); 2316 2317 lpfc_debugfs_terminate(vport); 2318 2319 if (phba->intr_type == MSIX) 2320 lpfc_disable_msix(phba); 2321 else { 2322 free_irq(phba->pcidev->irq, phba); 2323 if (phba->intr_type == MSI) 2324 pci_disable_msi(phba->pcidev); 2325 } 2326 2327 pci_set_drvdata(pdev, NULL); 2328 scsi_host_put(shost); 2329 2330 /* 2331 * Call scsi_free before mem_free since scsi bufs are released to their 2332 * corresponding pools here. 2333 */ 2334 lpfc_scsi_free(phba); 2335 lpfc_mem_free(phba); 2336 2337 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2338 phba->hbqslimp.phys); 2339 2340 /* Free resources associated with SLI2 interface */ 2341 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 2342 phba->slim2p, phba->slim2p_mapping); 2343 2344 /* unmap adapter SLIM and Control Registers */ 2345 iounmap(phba->ctrl_regs_memmap_p); 2346 iounmap(phba->slim_memmap_p); 2347 2348 idr_remove(&lpfc_hba_index, phba->brd_no); 2349 2350 kfree(phba); 2351 2352 pci_release_selected_regions(pdev, bars); 2353 pci_disable_device(pdev); 2354 } 2355 2356 /** 2357 * lpfc_io_error_detected - called when PCI error is detected 2358 * @pdev: Pointer to PCI device 2359 * @state: The current pci conneection state 2360 * 2361 * This function is called after a PCI bus error affecting 2362 * this device has been detected. 2363 */ 2364 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 2365 pci_channel_state_t state) 2366 { 2367 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2368 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2369 struct lpfc_sli *psli = &phba->sli; 2370 struct lpfc_sli_ring *pring; 2371 2372 if (state == pci_channel_io_perm_failure) 2373 return PCI_ERS_RESULT_DISCONNECT; 2374 2375 pci_disable_device(pdev); 2376 /* 2377 * There may be I/Os dropped by the firmware. 2378 * Error iocb (I/O) on txcmplq and let the SCSI layer 2379 * retry it after re-establishing link. 2380 */ 2381 pring = &psli->ring[psli->fcp_ring]; 2382 lpfc_sli_abort_iocb_ring(phba, pring); 2383 2384 if (phba->intr_type == MSIX) 2385 lpfc_disable_msix(phba); 2386 else { 2387 free_irq(phba->pcidev->irq, phba); 2388 if (phba->intr_type == MSI) 2389 pci_disable_msi(phba->pcidev); 2390 } 2391 2392 /* Request a slot reset. */ 2393 return PCI_ERS_RESULT_NEED_RESET; 2394 } 2395 2396 /** 2397 * lpfc_io_slot_reset - called after the pci bus has been reset. 2398 * @pdev: Pointer to PCI device 2399 * 2400 * Restart the card from scratch, as if from a cold-boot. 2401 */ 2402 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 2403 { 2404 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2405 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2406 struct lpfc_sli *psli = &phba->sli; 2407 2408 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 2409 if (pci_enable_device_mem(pdev)) { 2410 printk(KERN_ERR "lpfc: Cannot re-enable " 2411 "PCI device after reset.\n"); 2412 return PCI_ERS_RESULT_DISCONNECT; 2413 } 2414 2415 pci_set_master(pdev); 2416 2417 /* Re-establishing Link */ 2418 spin_lock_irq(shost->host_lock); 2419 phba->pport->fc_flag |= FC_ESTABLISH_LINK; 2420 spin_unlock_irq(shost->host_lock); 2421 2422 spin_lock_irq(&phba->hbalock); 2423 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2424 spin_unlock_irq(&phba->hbalock); 2425 2426 2427 /* Take device offline; this will perform cleanup */ 2428 lpfc_offline(phba); 2429 lpfc_sli_brdrestart(phba); 2430 2431 return PCI_ERS_RESULT_RECOVERED; 2432 } 2433 2434 /** 2435 * lpfc_io_resume - called when traffic can start flowing again. 2436 * @pdev: Pointer to PCI device 2437 * 2438 * This callback is called when the error recovery driver tells us that 2439 * its OK to resume normal operation. 2440 */ 2441 static void lpfc_io_resume(struct pci_dev *pdev) 2442 { 2443 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2444 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2445 2446 if (lpfc_online(phba) == 0) { 2447 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 2448 } 2449 } 2450 2451 static struct pci_device_id lpfc_id_table[] = { 2452 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 2453 PCI_ANY_ID, PCI_ANY_ID, }, 2454 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 2455 PCI_ANY_ID, PCI_ANY_ID, }, 2456 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 2457 PCI_ANY_ID, PCI_ANY_ID, }, 2458 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 2459 PCI_ANY_ID, PCI_ANY_ID, }, 2460 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 2461 PCI_ANY_ID, PCI_ANY_ID, }, 2462 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 2463 PCI_ANY_ID, PCI_ANY_ID, }, 2464 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 2465 PCI_ANY_ID, PCI_ANY_ID, }, 2466 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 2467 PCI_ANY_ID, PCI_ANY_ID, }, 2468 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 2469 PCI_ANY_ID, PCI_ANY_ID, }, 2470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 2471 PCI_ANY_ID, PCI_ANY_ID, }, 2472 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 2473 PCI_ANY_ID, PCI_ANY_ID, }, 2474 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 2475 PCI_ANY_ID, PCI_ANY_ID, }, 2476 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 2477 PCI_ANY_ID, PCI_ANY_ID, }, 2478 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 2479 PCI_ANY_ID, PCI_ANY_ID, }, 2480 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 2481 PCI_ANY_ID, PCI_ANY_ID, }, 2482 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 2483 PCI_ANY_ID, PCI_ANY_ID, }, 2484 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 2485 PCI_ANY_ID, PCI_ANY_ID, }, 2486 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 2487 PCI_ANY_ID, PCI_ANY_ID, }, 2488 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 2489 PCI_ANY_ID, PCI_ANY_ID, }, 2490 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 2491 PCI_ANY_ID, PCI_ANY_ID, }, 2492 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 2493 PCI_ANY_ID, PCI_ANY_ID, }, 2494 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 2495 PCI_ANY_ID, PCI_ANY_ID, }, 2496 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 2497 PCI_ANY_ID, PCI_ANY_ID, }, 2498 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 2499 PCI_ANY_ID, PCI_ANY_ID, }, 2500 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 2501 PCI_ANY_ID, PCI_ANY_ID, }, 2502 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 2503 PCI_ANY_ID, PCI_ANY_ID, }, 2504 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 2505 PCI_ANY_ID, PCI_ANY_ID, }, 2506 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 2507 PCI_ANY_ID, PCI_ANY_ID, }, 2508 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 2509 PCI_ANY_ID, PCI_ANY_ID, }, 2510 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 2511 PCI_ANY_ID, PCI_ANY_ID, }, 2512 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 2513 PCI_ANY_ID, PCI_ANY_ID, }, 2514 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 2515 PCI_ANY_ID, PCI_ANY_ID, }, 2516 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 2517 PCI_ANY_ID, PCI_ANY_ID, }, 2518 { 0 } 2519 }; 2520 2521 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 2522 2523 static struct pci_error_handlers lpfc_err_handler = { 2524 .error_detected = lpfc_io_error_detected, 2525 .slot_reset = lpfc_io_slot_reset, 2526 .resume = lpfc_io_resume, 2527 }; 2528 2529 static struct pci_driver lpfc_driver = { 2530 .name = LPFC_DRIVER_NAME, 2531 .id_table = lpfc_id_table, 2532 .probe = lpfc_pci_probe_one, 2533 .remove = __devexit_p(lpfc_pci_remove_one), 2534 .err_handler = &lpfc_err_handler, 2535 }; 2536 2537 static int __init 2538 lpfc_init(void) 2539 { 2540 int error = 0; 2541 2542 printk(LPFC_MODULE_DESC "\n"); 2543 printk(LPFC_COPYRIGHT "\n"); 2544 2545 if (lpfc_enable_npiv) { 2546 lpfc_transport_functions.vport_create = lpfc_vport_create; 2547 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 2548 } 2549 lpfc_transport_template = 2550 fc_attach_transport(&lpfc_transport_functions); 2551 if (lpfc_transport_template == NULL) 2552 return -ENOMEM; 2553 if (lpfc_enable_npiv) { 2554 lpfc_vport_transport_template = 2555 fc_attach_transport(&lpfc_vport_transport_functions); 2556 if (lpfc_vport_transport_template == NULL) { 2557 fc_release_transport(lpfc_transport_template); 2558 return -ENOMEM; 2559 } 2560 } 2561 error = pci_register_driver(&lpfc_driver); 2562 if (error) { 2563 fc_release_transport(lpfc_transport_template); 2564 fc_release_transport(lpfc_vport_transport_template); 2565 } 2566 2567 return error; 2568 } 2569 2570 static void __exit 2571 lpfc_exit(void) 2572 { 2573 pci_unregister_driver(&lpfc_driver); 2574 fc_release_transport(lpfc_transport_template); 2575 if (lpfc_enable_npiv) 2576 fc_release_transport(lpfc_vport_transport_template); 2577 } 2578 2579 module_init(lpfc_init); 2580 module_exit(lpfc_exit); 2581 MODULE_LICENSE("GPL"); 2582 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 2583 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 2584 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 2585