1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_version.h" 46 47 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49 static int lpfc_post_rcv_buf(struct lpfc_hba *); 50 51 static struct scsi_transport_template *lpfc_transport_template = NULL; 52 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 53 static DEFINE_IDR(lpfc_hba_index); 54 55 /************************************************************************/ 56 /* */ 57 /* lpfc_config_port_prep */ 58 /* This routine will do LPFC initialization prior to the */ 59 /* CONFIG_PORT mailbox command. This will be initialized */ 60 /* as a SLI layer callback routine. */ 61 /* This routine returns 0 on success or -ERESTART if it wants */ 62 /* the SLI layer to reset the HBA and try again. Any */ 63 /* other return value indicates an error. */ 64 /* */ 65 /************************************************************************/ 66 int 67 lpfc_config_port_prep(struct lpfc_hba *phba) 68 { 69 lpfc_vpd_t *vp = &phba->vpd; 70 int i = 0, rc; 71 LPFC_MBOXQ_t *pmb; 72 MAILBOX_t *mb; 73 char *lpfc_vpd_data = NULL; 74 uint16_t offset = 0; 75 static char licensed[56] = 76 "key unlock for use with gnu public licensed code only\0"; 77 static int init_key = 1; 78 79 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 80 if (!pmb) { 81 phba->link_state = LPFC_HBA_ERROR; 82 return -ENOMEM; 83 } 84 85 mb = &pmb->mb; 86 phba->link_state = LPFC_INIT_MBX_CMDS; 87 88 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 89 if (init_key) { 90 uint32_t *ptext = (uint32_t *) licensed; 91 92 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 93 *ptext = cpu_to_be32(*ptext); 94 init_key = 0; 95 } 96 97 lpfc_read_nv(phba, pmb); 98 memset((char*)mb->un.varRDnvp.rsvd3, 0, 99 sizeof (mb->un.varRDnvp.rsvd3)); 100 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 101 sizeof (licensed)); 102 103 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 104 105 if (rc != MBX_SUCCESS) { 106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 107 "0324 Config Port initialization " 108 "error, mbxCmd x%x READ_NVPARM, " 109 "mbxStatus x%x\n", 110 mb->mbxCommand, mb->mbxStatus); 111 mempool_free(pmb, phba->mbox_mem_pool); 112 return -ERESTART; 113 } 114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 115 sizeof(phba->wwnn)); 116 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 117 sizeof(phba->wwpn)); 118 } 119 120 phba->sli3_options = 0x0; 121 122 /* Setup and issue mailbox READ REV command */ 123 lpfc_read_rev(phba, pmb); 124 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 125 if (rc != MBX_SUCCESS) { 126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 127 "0439 Adapter failed to init, mbxCmd x%x " 128 "READ_REV, mbxStatus x%x\n", 129 mb->mbxCommand, mb->mbxStatus); 130 mempool_free( pmb, phba->mbox_mem_pool); 131 return -ERESTART; 132 } 133 134 135 /* 136 * The value of rr must be 1 since the driver set the cv field to 1. 137 * This setting requires the FW to set all revision fields. 138 */ 139 if (mb->un.varRdRev.rr == 0) { 140 vp->rev.rBit = 0; 141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 142 "0440 Adapter failed to init, READ_REV has " 143 "missing revision information.\n"); 144 mempool_free(pmb, phba->mbox_mem_pool); 145 return -ERESTART; 146 } 147 148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) 149 return -EINVAL; 150 151 /* Save information as VPD data */ 152 vp->rev.rBit = 1; 153 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 154 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 155 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 156 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 157 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 158 vp->rev.biuRev = mb->un.varRdRev.biuRev; 159 vp->rev.smRev = mb->un.varRdRev.smRev; 160 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 161 vp->rev.endecRev = mb->un.varRdRev.endecRev; 162 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 163 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 164 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 165 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 166 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 167 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 168 169 /* If the sli feature level is less then 9, we must 170 * tear down all RPIs and VPIs on link down if NPIV 171 * is enabled. 172 */ 173 if (vp->rev.feaLevelHigh < 9) 174 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 175 176 if (lpfc_is_LC_HBA(phba->pcidev->device)) 177 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 178 sizeof (phba->RandomData)); 179 180 /* Get adapter VPD information */ 181 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 182 if (!pmb->context2) 183 goto out_free_mbox; 184 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 185 if (!lpfc_vpd_data) 186 goto out_free_context2; 187 188 do { 189 lpfc_dump_mem(phba, pmb, offset); 190 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 191 192 if (rc != MBX_SUCCESS) { 193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 194 "0441 VPD not present on adapter, " 195 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 196 mb->mbxCommand, mb->mbxStatus); 197 mb->un.varDmp.word_cnt = 0; 198 } 199 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 200 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 201 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 202 mb->un.varDmp.word_cnt); 203 offset += mb->un.varDmp.word_cnt; 204 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 205 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 206 207 kfree(lpfc_vpd_data); 208 out_free_context2: 209 kfree(pmb->context2); 210 out_free_mbox: 211 mempool_free(pmb, phba->mbox_mem_pool); 212 return 0; 213 } 214 215 /* Completion handler for config async event mailbox command. */ 216 static void 217 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 218 { 219 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 220 phba->temp_sensor_support = 1; 221 else 222 phba->temp_sensor_support = 0; 223 mempool_free(pmboxq, phba->mbox_mem_pool); 224 return; 225 } 226 227 /************************************************************************/ 228 /* */ 229 /* lpfc_config_port_post */ 230 /* This routine will do LPFC initialization after the */ 231 /* CONFIG_PORT mailbox command. This will be initialized */ 232 /* as a SLI layer callback routine. */ 233 /* This routine returns 0 on success. Any other return value */ 234 /* indicates an error. */ 235 /* */ 236 /************************************************************************/ 237 int 238 lpfc_config_port_post(struct lpfc_hba *phba) 239 { 240 struct lpfc_vport *vport = phba->pport; 241 LPFC_MBOXQ_t *pmb; 242 MAILBOX_t *mb; 243 struct lpfc_dmabuf *mp; 244 struct lpfc_sli *psli = &phba->sli; 245 uint32_t status, timeout; 246 int i, j; 247 int rc; 248 249 spin_lock_irq(&phba->hbalock); 250 /* 251 * If the Config port completed correctly the HBA is not 252 * over heated any more. 253 */ 254 if (phba->over_temp_state == HBA_OVER_TEMP) 255 phba->over_temp_state = HBA_NORMAL_TEMP; 256 spin_unlock_irq(&phba->hbalock); 257 258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 259 if (!pmb) { 260 phba->link_state = LPFC_HBA_ERROR; 261 return -ENOMEM; 262 } 263 mb = &pmb->mb; 264 265 /* Get login parameters for NID. */ 266 lpfc_read_sparam(phba, pmb, 0); 267 pmb->vport = vport; 268 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 270 "0448 Adapter failed init, mbxCmd x%x " 271 "READ_SPARM mbxStatus x%x\n", 272 mb->mbxCommand, mb->mbxStatus); 273 phba->link_state = LPFC_HBA_ERROR; 274 mp = (struct lpfc_dmabuf *) pmb->context1; 275 mempool_free( pmb, phba->mbox_mem_pool); 276 lpfc_mbuf_free(phba, mp->virt, mp->phys); 277 kfree(mp); 278 return -EIO; 279 } 280 281 mp = (struct lpfc_dmabuf *) pmb->context1; 282 283 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 284 lpfc_mbuf_free(phba, mp->virt, mp->phys); 285 kfree(mp); 286 pmb->context1 = NULL; 287 288 if (phba->cfg_soft_wwnn) 289 u64_to_wwn(phba->cfg_soft_wwnn, 290 vport->fc_sparam.nodeName.u.wwn); 291 if (phba->cfg_soft_wwpn) 292 u64_to_wwn(phba->cfg_soft_wwpn, 293 vport->fc_sparam.portName.u.wwn); 294 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 295 sizeof (struct lpfc_name)); 296 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 297 sizeof (struct lpfc_name)); 298 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 299 /* This should be consolidated into parse_vpd ? - mr */ 300 if (phba->SerialNumber[0] == 0) { 301 uint8_t *outptr; 302 303 outptr = &vport->fc_nodename.u.s.IEEE[0]; 304 for (i = 0; i < 12; i++) { 305 status = *outptr++; 306 j = ((status & 0xf0) >> 4); 307 if (j <= 9) 308 phba->SerialNumber[i] = 309 (char)((uint8_t) 0x30 + (uint8_t) j); 310 else 311 phba->SerialNumber[i] = 312 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 313 i++; 314 j = (status & 0xf); 315 if (j <= 9) 316 phba->SerialNumber[i] = 317 (char)((uint8_t) 0x30 + (uint8_t) j); 318 else 319 phba->SerialNumber[i] = 320 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 321 } 322 } 323 324 lpfc_read_config(phba, pmb); 325 pmb->vport = vport; 326 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 328 "0453 Adapter failed to init, mbxCmd x%x " 329 "READ_CONFIG, mbxStatus x%x\n", 330 mb->mbxCommand, mb->mbxStatus); 331 phba->link_state = LPFC_HBA_ERROR; 332 mempool_free( pmb, phba->mbox_mem_pool); 333 return -EIO; 334 } 335 336 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 337 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 338 phba->cfg_hba_queue_depth = 339 mb->un.varRdConfig.max_xri + 1; 340 341 phba->lmt = mb->un.varRdConfig.lmt; 342 343 /* Get the default values for Model Name and Description */ 344 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 345 346 if ((phba->cfg_link_speed > LINK_SPEED_10G) 347 || ((phba->cfg_link_speed == LINK_SPEED_1G) 348 && !(phba->lmt & LMT_1Gb)) 349 || ((phba->cfg_link_speed == LINK_SPEED_2G) 350 && !(phba->lmt & LMT_2Gb)) 351 || ((phba->cfg_link_speed == LINK_SPEED_4G) 352 && !(phba->lmt & LMT_4Gb)) 353 || ((phba->cfg_link_speed == LINK_SPEED_8G) 354 && !(phba->lmt & LMT_8Gb)) 355 || ((phba->cfg_link_speed == LINK_SPEED_10G) 356 && !(phba->lmt & LMT_10Gb))) { 357 /* Reset link speed to auto */ 358 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 359 "1302 Invalid speed for this board: " 360 "Reset link speed to auto: x%x\n", 361 phba->cfg_link_speed); 362 phba->cfg_link_speed = LINK_SPEED_AUTO; 363 } 364 365 phba->link_state = LPFC_LINK_DOWN; 366 367 /* Only process IOCBs on ELS ring till hba_state is READY */ 368 if (psli->ring[psli->extra_ring].cmdringaddr) 369 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 370 if (psli->ring[psli->fcp_ring].cmdringaddr) 371 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 372 if (psli->ring[psli->next_ring].cmdringaddr) 373 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 374 375 /* Post receive buffers for desired rings */ 376 if (phba->sli_rev != 3) 377 lpfc_post_rcv_buf(phba); 378 379 /* Enable appropriate host interrupts */ 380 spin_lock_irq(&phba->hbalock); 381 status = readl(phba->HCregaddr); 382 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 383 if (psli->num_rings > 0) 384 status |= HC_R0INT_ENA; 385 if (psli->num_rings > 1) 386 status |= HC_R1INT_ENA; 387 if (psli->num_rings > 2) 388 status |= HC_R2INT_ENA; 389 if (psli->num_rings > 3) 390 status |= HC_R3INT_ENA; 391 392 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 393 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 394 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 395 396 writel(status, phba->HCregaddr); 397 readl(phba->HCregaddr); /* flush */ 398 spin_unlock_irq(&phba->hbalock); 399 400 /* 401 * Setup the ring 0 (els) timeout handler 402 */ 403 timeout = phba->fc_ratov << 1; 404 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 405 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 406 phba->hb_outstanding = 0; 407 phba->last_completion_time = jiffies; 408 409 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 410 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 411 pmb->vport = vport; 412 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 413 lpfc_set_loopback_flag(phba); 414 if (rc != MBX_SUCCESS) { 415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 416 "0454 Adapter failed to init, mbxCmd x%x " 417 "INIT_LINK, mbxStatus x%x\n", 418 mb->mbxCommand, mb->mbxStatus); 419 420 /* Clear all interrupt enable conditions */ 421 writel(0, phba->HCregaddr); 422 readl(phba->HCregaddr); /* flush */ 423 /* Clear all pending interrupts */ 424 writel(0xffffffff, phba->HAregaddr); 425 readl(phba->HAregaddr); /* flush */ 426 427 phba->link_state = LPFC_HBA_ERROR; 428 if (rc != MBX_BUSY) 429 mempool_free(pmb, phba->mbox_mem_pool); 430 return -EIO; 431 } 432 /* MBOX buffer will be freed in mbox compl */ 433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 434 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 435 pmb->mbox_cmpl = lpfc_config_async_cmpl; 436 pmb->vport = phba->pport; 437 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 438 439 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 440 lpfc_printf_log(phba, 441 KERN_ERR, 442 LOG_INIT, 443 "0456 Adapter failed to issue " 444 "ASYNCEVT_ENABLE mbox status x%x \n.", 445 rc); 446 mempool_free(pmb, phba->mbox_mem_pool); 447 } 448 return (0); 449 } 450 451 /************************************************************************/ 452 /* */ 453 /* lpfc_hba_down_prep */ 454 /* This routine will do LPFC uninitialization before the */ 455 /* HBA is reset when bringing down the SLI Layer. This will be */ 456 /* initialized as a SLI layer callback routine. */ 457 /* This routine returns 0 on success. Any other return value */ 458 /* indicates an error. */ 459 /* */ 460 /************************************************************************/ 461 int 462 lpfc_hba_down_prep(struct lpfc_hba *phba) 463 { 464 struct lpfc_vport **vports; 465 int i; 466 /* Disable interrupts */ 467 writel(0, phba->HCregaddr); 468 readl(phba->HCregaddr); /* flush */ 469 470 if (phba->pport->load_flag & FC_UNLOADING) 471 lpfc_cleanup_discovery_resources(phba->pport); 472 else { 473 vports = lpfc_create_vport_work_array(phba); 474 if (vports != NULL) 475 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 476 lpfc_cleanup_discovery_resources(vports[i]); 477 lpfc_destroy_vport_work_array(phba, vports); 478 } 479 return 0; 480 } 481 482 /************************************************************************/ 483 /* */ 484 /* lpfc_hba_down_post */ 485 /* This routine will do uninitialization after the HBA is reset */ 486 /* when bringing down the SLI Layer. */ 487 /* This routine returns 0 on success. Any other return value */ 488 /* indicates an error. */ 489 /* */ 490 /************************************************************************/ 491 int 492 lpfc_hba_down_post(struct lpfc_hba *phba) 493 { 494 struct lpfc_sli *psli = &phba->sli; 495 struct lpfc_sli_ring *pring; 496 struct lpfc_dmabuf *mp, *next_mp; 497 struct lpfc_iocbq *iocb; 498 IOCB_t *cmd = NULL; 499 LIST_HEAD(completions); 500 int i; 501 502 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 503 lpfc_sli_hbqbuf_free_all(phba); 504 else { 505 /* Cleanup preposted buffers on the ELS ring */ 506 pring = &psli->ring[LPFC_ELS_RING]; 507 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 508 list_del(&mp->list); 509 pring->postbufq_cnt--; 510 lpfc_mbuf_free(phba, mp->virt, mp->phys); 511 kfree(mp); 512 } 513 } 514 515 spin_lock_irq(&phba->hbalock); 516 for (i = 0; i < psli->num_rings; i++) { 517 pring = &psli->ring[i]; 518 519 /* At this point in time the HBA is either reset or DOA. Either 520 * way, nothing should be on txcmplq as it will NEVER complete. 521 */ 522 list_splice_init(&pring->txcmplq, &completions); 523 pring->txcmplq_cnt = 0; 524 spin_unlock_irq(&phba->hbalock); 525 526 while (!list_empty(&completions)) { 527 iocb = list_get_first(&completions, struct lpfc_iocbq, 528 list); 529 cmd = &iocb->iocb; 530 list_del_init(&iocb->list); 531 532 if (!iocb->iocb_cmpl) 533 lpfc_sli_release_iocbq(phba, iocb); 534 else { 535 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 536 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 537 (iocb->iocb_cmpl) (phba, iocb, iocb); 538 } 539 } 540 541 lpfc_sli_abort_iocb_ring(phba, pring); 542 spin_lock_irq(&phba->hbalock); 543 } 544 spin_unlock_irq(&phba->hbalock); 545 546 return 0; 547 } 548 549 /* HBA heart beat timeout handler */ 550 static void 551 lpfc_hb_timeout(unsigned long ptr) 552 { 553 struct lpfc_hba *phba; 554 unsigned long iflag; 555 556 phba = (struct lpfc_hba *)ptr; 557 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 558 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) 559 phba->pport->work_port_events |= WORKER_HB_TMO; 560 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 561 562 spin_lock_irqsave(&phba->hbalock, iflag); 563 if (phba->work_wait) 564 wake_up(phba->work_wait); 565 spin_unlock_irqrestore(&phba->hbalock, iflag); 566 return; 567 } 568 569 static void 570 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 571 { 572 unsigned long drvr_flag; 573 574 spin_lock_irqsave(&phba->hbalock, drvr_flag); 575 phba->hb_outstanding = 0; 576 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 577 578 mempool_free(pmboxq, phba->mbox_mem_pool); 579 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 580 !(phba->link_state == LPFC_HBA_ERROR) && 581 !(phba->pport->load_flag & FC_UNLOADING)) 582 mod_timer(&phba->hb_tmofunc, 583 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 584 return; 585 } 586 587 void 588 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 589 { 590 LPFC_MBOXQ_t *pmboxq; 591 struct lpfc_dmabuf *buf_ptr; 592 int retval; 593 struct lpfc_sli *psli = &phba->sli; 594 LIST_HEAD(completions); 595 596 if ((phba->link_state == LPFC_HBA_ERROR) || 597 (phba->pport->load_flag & FC_UNLOADING) || 598 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 599 return; 600 601 spin_lock_irq(&phba->pport->work_port_lock); 602 /* If the timer is already canceled do nothing */ 603 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { 604 spin_unlock_irq(&phba->pport->work_port_lock); 605 return; 606 } 607 608 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 609 jiffies)) { 610 spin_unlock_irq(&phba->pport->work_port_lock); 611 if (!phba->hb_outstanding) 612 mod_timer(&phba->hb_tmofunc, 613 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 614 else 615 mod_timer(&phba->hb_tmofunc, 616 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 617 return; 618 } 619 spin_unlock_irq(&phba->pport->work_port_lock); 620 621 if (phba->elsbuf_cnt && 622 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 623 spin_lock_irq(&phba->hbalock); 624 list_splice_init(&phba->elsbuf, &completions); 625 phba->elsbuf_cnt = 0; 626 phba->elsbuf_prev_cnt = 0; 627 spin_unlock_irq(&phba->hbalock); 628 629 while (!list_empty(&completions)) { 630 list_remove_head(&completions, buf_ptr, 631 struct lpfc_dmabuf, list); 632 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 633 kfree(buf_ptr); 634 } 635 } 636 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 637 638 /* If there is no heart beat outstanding, issue a heartbeat command */ 639 if (phba->cfg_enable_hba_heartbeat) { 640 if (!phba->hb_outstanding) { 641 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 642 if (!pmboxq) { 643 mod_timer(&phba->hb_tmofunc, 644 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 645 return; 646 } 647 648 lpfc_heart_beat(phba, pmboxq); 649 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 650 pmboxq->vport = phba->pport; 651 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 652 653 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 654 mempool_free(pmboxq, phba->mbox_mem_pool); 655 mod_timer(&phba->hb_tmofunc, 656 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 657 return; 658 } 659 mod_timer(&phba->hb_tmofunc, 660 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 661 phba->hb_outstanding = 1; 662 return; 663 } else { 664 /* 665 * If heart beat timeout called with hb_outstanding set 666 * we need to take the HBA offline. 667 */ 668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 669 "0459 Adapter heartbeat failure, " 670 "taking this port offline.\n"); 671 672 spin_lock_irq(&phba->hbalock); 673 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 674 spin_unlock_irq(&phba->hbalock); 675 676 lpfc_offline_prep(phba); 677 lpfc_offline(phba); 678 lpfc_unblock_mgmt_io(phba); 679 phba->link_state = LPFC_HBA_ERROR; 680 lpfc_hba_down_post(phba); 681 } 682 } 683 } 684 685 static void 686 lpfc_offline_eratt(struct lpfc_hba *phba) 687 { 688 struct lpfc_sli *psli = &phba->sli; 689 690 spin_lock_irq(&phba->hbalock); 691 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 692 spin_unlock_irq(&phba->hbalock); 693 lpfc_offline_prep(phba); 694 695 lpfc_offline(phba); 696 lpfc_reset_barrier(phba); 697 lpfc_sli_brdreset(phba); 698 lpfc_hba_down_post(phba); 699 lpfc_sli_brdready(phba, HS_MBRDY); 700 lpfc_unblock_mgmt_io(phba); 701 phba->link_state = LPFC_HBA_ERROR; 702 return; 703 } 704 705 /************************************************************************/ 706 /* */ 707 /* lpfc_handle_eratt */ 708 /* This routine will handle processing a Host Attention */ 709 /* Error Status event. This will be initialized */ 710 /* as a SLI layer callback routine. */ 711 /* */ 712 /************************************************************************/ 713 void 714 lpfc_handle_eratt(struct lpfc_hba *phba) 715 { 716 struct lpfc_vport *vport = phba->pport; 717 struct lpfc_sli *psli = &phba->sli; 718 struct lpfc_sli_ring *pring; 719 uint32_t event_data; 720 unsigned long temperature; 721 struct temp_event temp_event_data; 722 struct Scsi_Host *shost; 723 724 /* If the pci channel is offline, ignore possible errors, 725 * since we cannot communicate with the pci card anyway. */ 726 if (pci_channel_offline(phba->pcidev)) 727 return; 728 /* If resets are disabled then leave the HBA alone and return */ 729 if (!phba->cfg_enable_hba_reset) 730 return; 731 732 if (phba->work_hs & HS_FFER6 || 733 phba->work_hs & HS_FFER5) { 734 /* Re-establishing Link */ 735 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 736 "1301 Re-establishing Link " 737 "Data: x%x x%x x%x\n", 738 phba->work_hs, 739 phba->work_status[0], phba->work_status[1]); 740 741 spin_lock_irq(&phba->hbalock); 742 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 743 spin_unlock_irq(&phba->hbalock); 744 745 /* 746 * Firmware stops when it triggled erratt with HS_FFER6. 747 * That could cause the I/Os dropped by the firmware. 748 * Error iocb (I/O) on txcmplq and let the SCSI layer 749 * retry it after re-establishing link. 750 */ 751 pring = &psli->ring[psli->fcp_ring]; 752 lpfc_sli_abort_iocb_ring(phba, pring); 753 754 /* 755 * There was a firmware error. Take the hba offline and then 756 * attempt to restart it. 757 */ 758 lpfc_offline_prep(phba); 759 lpfc_offline(phba); 760 lpfc_sli_brdrestart(phba); 761 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 762 lpfc_unblock_mgmt_io(phba); 763 return; 764 } 765 lpfc_unblock_mgmt_io(phba); 766 } else if (phba->work_hs & HS_CRIT_TEMP) { 767 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 768 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 769 temp_event_data.event_code = LPFC_CRIT_TEMP; 770 temp_event_data.data = (uint32_t)temperature; 771 772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 773 "0459 Adapter maximum temperature exceeded " 774 "(%ld), taking this port offline " 775 "Data: x%x x%x x%x\n", 776 temperature, phba->work_hs, 777 phba->work_status[0], phba->work_status[1]); 778 779 shost = lpfc_shost_from_vport(phba->pport); 780 fc_host_post_vendor_event(shost, fc_get_event_number(), 781 sizeof(temp_event_data), 782 (char *) &temp_event_data, 783 SCSI_NL_VID_TYPE_PCI 784 | PCI_VENDOR_ID_EMULEX); 785 786 spin_lock_irq(&phba->hbalock); 787 phba->over_temp_state = HBA_OVER_TEMP; 788 spin_unlock_irq(&phba->hbalock); 789 lpfc_offline_eratt(phba); 790 791 } else { 792 /* The if clause above forces this code path when the status 793 * failure is a value other than FFER6. Do not call the offline 794 * twice. This is the adapter hardware error path. 795 */ 796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 797 "0457 Adapter Hardware Error " 798 "Data: x%x x%x x%x\n", 799 phba->work_hs, 800 phba->work_status[0], phba->work_status[1]); 801 802 event_data = FC_REG_DUMP_EVENT; 803 shost = lpfc_shost_from_vport(vport); 804 fc_host_post_vendor_event(shost, fc_get_event_number(), 805 sizeof(event_data), (char *) &event_data, 806 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 807 808 lpfc_offline_eratt(phba); 809 } 810 } 811 812 /************************************************************************/ 813 /* */ 814 /* lpfc_handle_latt */ 815 /* This routine will handle processing a Host Attention */ 816 /* Link Status event. This will be initialized */ 817 /* as a SLI layer callback routine. */ 818 /* */ 819 /************************************************************************/ 820 void 821 lpfc_handle_latt(struct lpfc_hba *phba) 822 { 823 struct lpfc_vport *vport = phba->pport; 824 struct lpfc_sli *psli = &phba->sli; 825 LPFC_MBOXQ_t *pmb; 826 volatile uint32_t control; 827 struct lpfc_dmabuf *mp; 828 int rc = 0; 829 830 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 831 if (!pmb) { 832 rc = 1; 833 goto lpfc_handle_latt_err_exit; 834 } 835 836 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 837 if (!mp) { 838 rc = 2; 839 goto lpfc_handle_latt_free_pmb; 840 } 841 842 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 843 if (!mp->virt) { 844 rc = 3; 845 goto lpfc_handle_latt_free_mp; 846 } 847 848 /* Cleanup any outstanding ELS commands */ 849 lpfc_els_flush_all_cmd(phba); 850 851 psli->slistat.link_event++; 852 lpfc_read_la(phba, pmb, mp); 853 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 854 pmb->vport = vport; 855 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 856 if (rc == MBX_NOT_FINISHED) { 857 rc = 4; 858 goto lpfc_handle_latt_free_mbuf; 859 } 860 861 /* Clear Link Attention in HA REG */ 862 spin_lock_irq(&phba->hbalock); 863 writel(HA_LATT, phba->HAregaddr); 864 readl(phba->HAregaddr); /* flush */ 865 spin_unlock_irq(&phba->hbalock); 866 867 return; 868 869 lpfc_handle_latt_free_mbuf: 870 lpfc_mbuf_free(phba, mp->virt, mp->phys); 871 lpfc_handle_latt_free_mp: 872 kfree(mp); 873 lpfc_handle_latt_free_pmb: 874 mempool_free(pmb, phba->mbox_mem_pool); 875 lpfc_handle_latt_err_exit: 876 /* Enable Link attention interrupts */ 877 spin_lock_irq(&phba->hbalock); 878 psli->sli_flag |= LPFC_PROCESS_LA; 879 control = readl(phba->HCregaddr); 880 control |= HC_LAINT_ENA; 881 writel(control, phba->HCregaddr); 882 readl(phba->HCregaddr); /* flush */ 883 884 /* Clear Link Attention in HA REG */ 885 writel(HA_LATT, phba->HAregaddr); 886 readl(phba->HAregaddr); /* flush */ 887 spin_unlock_irq(&phba->hbalock); 888 lpfc_linkdown(phba); 889 phba->link_state = LPFC_HBA_ERROR; 890 891 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 892 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 893 894 return; 895 } 896 897 /************************************************************************/ 898 /* */ 899 /* lpfc_parse_vpd */ 900 /* This routine will parse the VPD data */ 901 /* */ 902 /************************************************************************/ 903 static int 904 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 905 { 906 uint8_t lenlo, lenhi; 907 int Length; 908 int i, j; 909 int finished = 0; 910 int index = 0; 911 912 if (!vpd) 913 return 0; 914 915 /* Vital Product */ 916 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 917 "0455 Vital Product Data: x%x x%x x%x x%x\n", 918 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 919 (uint32_t) vpd[3]); 920 while (!finished && (index < (len - 4))) { 921 switch (vpd[index]) { 922 case 0x82: 923 case 0x91: 924 index += 1; 925 lenlo = vpd[index]; 926 index += 1; 927 lenhi = vpd[index]; 928 index += 1; 929 i = ((((unsigned short)lenhi) << 8) + lenlo); 930 index += i; 931 break; 932 case 0x90: 933 index += 1; 934 lenlo = vpd[index]; 935 index += 1; 936 lenhi = vpd[index]; 937 index += 1; 938 Length = ((((unsigned short)lenhi) << 8) + lenlo); 939 if (Length > len - index) 940 Length = len - index; 941 while (Length > 0) { 942 /* Look for Serial Number */ 943 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 944 index += 2; 945 i = vpd[index]; 946 index += 1; 947 j = 0; 948 Length -= (3+i); 949 while(i--) { 950 phba->SerialNumber[j++] = vpd[index++]; 951 if (j == 31) 952 break; 953 } 954 phba->SerialNumber[j] = 0; 955 continue; 956 } 957 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 958 phba->vpd_flag |= VPD_MODEL_DESC; 959 index += 2; 960 i = vpd[index]; 961 index += 1; 962 j = 0; 963 Length -= (3+i); 964 while(i--) { 965 phba->ModelDesc[j++] = vpd[index++]; 966 if (j == 255) 967 break; 968 } 969 phba->ModelDesc[j] = 0; 970 continue; 971 } 972 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 973 phba->vpd_flag |= VPD_MODEL_NAME; 974 index += 2; 975 i = vpd[index]; 976 index += 1; 977 j = 0; 978 Length -= (3+i); 979 while(i--) { 980 phba->ModelName[j++] = vpd[index++]; 981 if (j == 79) 982 break; 983 } 984 phba->ModelName[j] = 0; 985 continue; 986 } 987 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 988 phba->vpd_flag |= VPD_PROGRAM_TYPE; 989 index += 2; 990 i = vpd[index]; 991 index += 1; 992 j = 0; 993 Length -= (3+i); 994 while(i--) { 995 phba->ProgramType[j++] = vpd[index++]; 996 if (j == 255) 997 break; 998 } 999 phba->ProgramType[j] = 0; 1000 continue; 1001 } 1002 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1003 phba->vpd_flag |= VPD_PORT; 1004 index += 2; 1005 i = vpd[index]; 1006 index += 1; 1007 j = 0; 1008 Length -= (3+i); 1009 while(i--) { 1010 phba->Port[j++] = vpd[index++]; 1011 if (j == 19) 1012 break; 1013 } 1014 phba->Port[j] = 0; 1015 continue; 1016 } 1017 else { 1018 index += 2; 1019 i = vpd[index]; 1020 index += 1; 1021 index += i; 1022 Length -= (3 + i); 1023 } 1024 } 1025 finished = 0; 1026 break; 1027 case 0x78: 1028 finished = 1; 1029 break; 1030 default: 1031 index ++; 1032 break; 1033 } 1034 } 1035 1036 return(1); 1037 } 1038 1039 static void 1040 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1041 { 1042 lpfc_vpd_t *vp; 1043 uint16_t dev_id = phba->pcidev->device; 1044 int max_speed; 1045 struct { 1046 char * name; 1047 int max_speed; 1048 char * bus; 1049 } m = {"<Unknown>", 0, ""}; 1050 1051 if (mdp && mdp[0] != '\0' 1052 && descp && descp[0] != '\0') 1053 return; 1054 1055 if (phba->lmt & LMT_10Gb) 1056 max_speed = 10; 1057 else if (phba->lmt & LMT_8Gb) 1058 max_speed = 8; 1059 else if (phba->lmt & LMT_4Gb) 1060 max_speed = 4; 1061 else if (phba->lmt & LMT_2Gb) 1062 max_speed = 2; 1063 else 1064 max_speed = 1; 1065 1066 vp = &phba->vpd; 1067 1068 switch (dev_id) { 1069 case PCI_DEVICE_ID_FIREFLY: 1070 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1071 break; 1072 case PCI_DEVICE_ID_SUPERFLY: 1073 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1074 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1075 else 1076 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1077 break; 1078 case PCI_DEVICE_ID_DRAGONFLY: 1079 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1080 break; 1081 case PCI_DEVICE_ID_CENTAUR: 1082 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1083 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1084 else 1085 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1086 break; 1087 case PCI_DEVICE_ID_RFLY: 1088 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1089 break; 1090 case PCI_DEVICE_ID_PEGASUS: 1091 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1092 break; 1093 case PCI_DEVICE_ID_THOR: 1094 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1095 break; 1096 case PCI_DEVICE_ID_VIPER: 1097 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1098 break; 1099 case PCI_DEVICE_ID_PFLY: 1100 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1101 break; 1102 case PCI_DEVICE_ID_TFLY: 1103 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1104 break; 1105 case PCI_DEVICE_ID_HELIOS: 1106 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1107 break; 1108 case PCI_DEVICE_ID_HELIOS_SCSP: 1109 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1110 break; 1111 case PCI_DEVICE_ID_HELIOS_DCSP: 1112 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1113 break; 1114 case PCI_DEVICE_ID_NEPTUNE: 1115 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1116 break; 1117 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1118 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1119 break; 1120 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1121 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1122 break; 1123 case PCI_DEVICE_ID_BMID: 1124 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1125 break; 1126 case PCI_DEVICE_ID_BSMB: 1127 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1128 break; 1129 case PCI_DEVICE_ID_ZEPHYR: 1130 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1131 break; 1132 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1133 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1134 break; 1135 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1136 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 1137 break; 1138 case PCI_DEVICE_ID_ZMID: 1139 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1140 break; 1141 case PCI_DEVICE_ID_ZSMB: 1142 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1143 break; 1144 case PCI_DEVICE_ID_LP101: 1145 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1146 break; 1147 case PCI_DEVICE_ID_LP10000S: 1148 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1149 break; 1150 case PCI_DEVICE_ID_LP11000S: 1151 m = (typeof(m)){"LP11000-S", max_speed, 1152 "PCI-X2"}; 1153 break; 1154 case PCI_DEVICE_ID_LPE11000S: 1155 m = (typeof(m)){"LPe11000-S", max_speed, 1156 "PCIe"}; 1157 break; 1158 case PCI_DEVICE_ID_SAT: 1159 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1160 break; 1161 case PCI_DEVICE_ID_SAT_MID: 1162 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1163 break; 1164 case PCI_DEVICE_ID_SAT_SMB: 1165 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1166 break; 1167 case PCI_DEVICE_ID_SAT_DCSP: 1168 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1169 break; 1170 case PCI_DEVICE_ID_SAT_SCSP: 1171 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1172 break; 1173 case PCI_DEVICE_ID_SAT_S: 1174 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1175 break; 1176 default: 1177 m = (typeof(m)){ NULL }; 1178 break; 1179 } 1180 1181 if (mdp && mdp[0] == '\0') 1182 snprintf(mdp, 79,"%s", m.name); 1183 if (descp && descp[0] == '\0') 1184 snprintf(descp, 255, 1185 "Emulex %s %dGb %s Fibre Channel Adapter", 1186 m.name, m.max_speed, m.bus); 1187 } 1188 1189 /**************************************************/ 1190 /* lpfc_post_buffer */ 1191 /* */ 1192 /* This routine will post count buffers to the */ 1193 /* ring with the QUE_RING_BUF_CN command. This */ 1194 /* allows 3 buffers / command to be posted. */ 1195 /* Returns the number of buffers NOT posted. */ 1196 /**************************************************/ 1197 int 1198 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, 1199 int type) 1200 { 1201 IOCB_t *icmd; 1202 struct lpfc_iocbq *iocb; 1203 struct lpfc_dmabuf *mp1, *mp2; 1204 1205 cnt += pring->missbufcnt; 1206 1207 /* While there are buffers to post */ 1208 while (cnt > 0) { 1209 /* Allocate buffer for command iocb */ 1210 iocb = lpfc_sli_get_iocbq(phba); 1211 if (iocb == NULL) { 1212 pring->missbufcnt = cnt; 1213 return cnt; 1214 } 1215 icmd = &iocb->iocb; 1216 1217 /* 2 buffers can be posted per command */ 1218 /* Allocate buffer to post */ 1219 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1220 if (mp1) 1221 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1222 if (!mp1 || !mp1->virt) { 1223 kfree(mp1); 1224 lpfc_sli_release_iocbq(phba, iocb); 1225 pring->missbufcnt = cnt; 1226 return cnt; 1227 } 1228 1229 INIT_LIST_HEAD(&mp1->list); 1230 /* Allocate buffer to post */ 1231 if (cnt > 1) { 1232 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1233 if (mp2) 1234 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1235 &mp2->phys); 1236 if (!mp2 || !mp2->virt) { 1237 kfree(mp2); 1238 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1239 kfree(mp1); 1240 lpfc_sli_release_iocbq(phba, iocb); 1241 pring->missbufcnt = cnt; 1242 return cnt; 1243 } 1244 1245 INIT_LIST_HEAD(&mp2->list); 1246 } else { 1247 mp2 = NULL; 1248 } 1249 1250 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1251 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1252 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1253 icmd->ulpBdeCount = 1; 1254 cnt--; 1255 if (mp2) { 1256 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1257 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1258 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1259 cnt--; 1260 icmd->ulpBdeCount = 2; 1261 } 1262 1263 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1264 icmd->ulpLe = 1; 1265 1266 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1267 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1268 kfree(mp1); 1269 cnt++; 1270 if (mp2) { 1271 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1272 kfree(mp2); 1273 cnt++; 1274 } 1275 lpfc_sli_release_iocbq(phba, iocb); 1276 pring->missbufcnt = cnt; 1277 return cnt; 1278 } 1279 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1280 if (mp2) 1281 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1282 } 1283 pring->missbufcnt = 0; 1284 return 0; 1285 } 1286 1287 /************************************************************************/ 1288 /* */ 1289 /* lpfc_post_rcv_buf */ 1290 /* This routine post initial rcv buffers to the configured rings */ 1291 /* */ 1292 /************************************************************************/ 1293 static int 1294 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1295 { 1296 struct lpfc_sli *psli = &phba->sli; 1297 1298 /* Ring 0, ELS / CT buffers */ 1299 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1300 /* Ring 2 - FCP no buffers needed */ 1301 1302 return 0; 1303 } 1304 1305 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1306 1307 /************************************************************************/ 1308 /* */ 1309 /* lpfc_sha_init */ 1310 /* */ 1311 /************************************************************************/ 1312 static void 1313 lpfc_sha_init(uint32_t * HashResultPointer) 1314 { 1315 HashResultPointer[0] = 0x67452301; 1316 HashResultPointer[1] = 0xEFCDAB89; 1317 HashResultPointer[2] = 0x98BADCFE; 1318 HashResultPointer[3] = 0x10325476; 1319 HashResultPointer[4] = 0xC3D2E1F0; 1320 } 1321 1322 /************************************************************************/ 1323 /* */ 1324 /* lpfc_sha_iterate */ 1325 /* */ 1326 /************************************************************************/ 1327 static void 1328 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1329 { 1330 int t; 1331 uint32_t TEMP; 1332 uint32_t A, B, C, D, E; 1333 t = 16; 1334 do { 1335 HashWorkingPointer[t] = 1336 S(1, 1337 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1338 8] ^ 1339 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1340 } while (++t <= 79); 1341 t = 0; 1342 A = HashResultPointer[0]; 1343 B = HashResultPointer[1]; 1344 C = HashResultPointer[2]; 1345 D = HashResultPointer[3]; 1346 E = HashResultPointer[4]; 1347 1348 do { 1349 if (t < 20) { 1350 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1351 } else if (t < 40) { 1352 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1353 } else if (t < 60) { 1354 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1355 } else { 1356 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1357 } 1358 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1359 E = D; 1360 D = C; 1361 C = S(30, B); 1362 B = A; 1363 A = TEMP; 1364 } while (++t <= 79); 1365 1366 HashResultPointer[0] += A; 1367 HashResultPointer[1] += B; 1368 HashResultPointer[2] += C; 1369 HashResultPointer[3] += D; 1370 HashResultPointer[4] += E; 1371 1372 } 1373 1374 /************************************************************************/ 1375 /* */ 1376 /* lpfc_challenge_key */ 1377 /* */ 1378 /************************************************************************/ 1379 static void 1380 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1381 { 1382 *HashWorking = (*RandomChallenge ^ *HashWorking); 1383 } 1384 1385 /************************************************************************/ 1386 /* */ 1387 /* lpfc_hba_init */ 1388 /* */ 1389 /************************************************************************/ 1390 void 1391 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1392 { 1393 int t; 1394 uint32_t *HashWorking; 1395 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1396 1397 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1398 if (!HashWorking) 1399 return; 1400 1401 HashWorking[0] = HashWorking[78] = *pwwnn++; 1402 HashWorking[1] = HashWorking[79] = *pwwnn; 1403 1404 for (t = 0; t < 7; t++) 1405 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1406 1407 lpfc_sha_init(hbainit); 1408 lpfc_sha_iterate(hbainit, HashWorking); 1409 kfree(HashWorking); 1410 } 1411 1412 void 1413 lpfc_cleanup(struct lpfc_vport *vport) 1414 { 1415 struct lpfc_hba *phba = vport->phba; 1416 struct lpfc_nodelist *ndlp, *next_ndlp; 1417 int i = 0; 1418 1419 if (phba->link_state > LPFC_LINK_DOWN) 1420 lpfc_port_link_failure(vport); 1421 1422 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1423 if (!NLP_CHK_NODE_ACT(ndlp)) { 1424 ndlp = lpfc_enable_node(vport, ndlp, 1425 NLP_STE_UNUSED_NODE); 1426 if (!ndlp) 1427 continue; 1428 spin_lock_irq(&phba->ndlp_lock); 1429 NLP_SET_FREE_REQ(ndlp); 1430 spin_unlock_irq(&phba->ndlp_lock); 1431 /* Trigger the release of the ndlp memory */ 1432 lpfc_nlp_put(ndlp); 1433 continue; 1434 } 1435 spin_lock_irq(&phba->ndlp_lock); 1436 if (NLP_CHK_FREE_REQ(ndlp)) { 1437 /* The ndlp should not be in memory free mode already */ 1438 spin_unlock_irq(&phba->ndlp_lock); 1439 continue; 1440 } else 1441 /* Indicate request for freeing ndlp memory */ 1442 NLP_SET_FREE_REQ(ndlp); 1443 spin_unlock_irq(&phba->ndlp_lock); 1444 1445 if (vport->port_type != LPFC_PHYSICAL_PORT && 1446 ndlp->nlp_DID == Fabric_DID) { 1447 /* Just free up ndlp with Fabric_DID for vports */ 1448 lpfc_nlp_put(ndlp); 1449 continue; 1450 } 1451 1452 if (ndlp->nlp_type & NLP_FABRIC) 1453 lpfc_disc_state_machine(vport, ndlp, NULL, 1454 NLP_EVT_DEVICE_RECOVERY); 1455 1456 lpfc_disc_state_machine(vport, ndlp, NULL, 1457 NLP_EVT_DEVICE_RM); 1458 } 1459 1460 /* At this point, ALL ndlp's should be gone 1461 * because of the previous NLP_EVT_DEVICE_RM. 1462 * Lets wait for this to happen, if needed. 1463 */ 1464 while (!list_empty(&vport->fc_nodes)) { 1465 1466 if (i++ > 3000) { 1467 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1468 "0233 Nodelist not empty\n"); 1469 list_for_each_entry_safe(ndlp, next_ndlp, 1470 &vport->fc_nodes, nlp_listp) { 1471 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1472 LOG_NODE, 1473 "0282: did:x%x ndlp:x%p " 1474 "usgmap:x%x refcnt:%d\n", 1475 ndlp->nlp_DID, (void *)ndlp, 1476 ndlp->nlp_usg_map, 1477 atomic_read( 1478 &ndlp->kref.refcount)); 1479 } 1480 break; 1481 } 1482 1483 /* Wait for any activity on ndlps to settle */ 1484 msleep(10); 1485 } 1486 return; 1487 } 1488 1489 void 1490 lpfc_stop_vport_timers(struct lpfc_vport *vport) 1491 { 1492 del_timer_sync(&vport->els_tmofunc); 1493 del_timer_sync(&vport->fc_fdmitmo); 1494 lpfc_can_disctmo(vport); 1495 return; 1496 } 1497 1498 static void 1499 lpfc_stop_phba_timers(struct lpfc_hba *phba) 1500 { 1501 del_timer_sync(&phba->fcp_poll_timer); 1502 lpfc_stop_vport_timers(phba->pport); 1503 del_timer_sync(&phba->sli.mbox_tmo); 1504 del_timer_sync(&phba->fabric_block_timer); 1505 phba->hb_outstanding = 0; 1506 del_timer_sync(&phba->hb_tmofunc); 1507 return; 1508 } 1509 1510 static void 1511 lpfc_block_mgmt_io(struct lpfc_hba * phba) 1512 { 1513 unsigned long iflag; 1514 1515 spin_lock_irqsave(&phba->hbalock, iflag); 1516 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 1517 spin_unlock_irqrestore(&phba->hbalock, iflag); 1518 } 1519 1520 int 1521 lpfc_online(struct lpfc_hba *phba) 1522 { 1523 struct lpfc_vport *vport = phba->pport; 1524 struct lpfc_vport **vports; 1525 int i; 1526 1527 if (!phba) 1528 return 0; 1529 1530 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1531 return 0; 1532 1533 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1534 "0458 Bring Adapter online\n"); 1535 1536 lpfc_block_mgmt_io(phba); 1537 1538 if (!lpfc_sli_queue_setup(phba)) { 1539 lpfc_unblock_mgmt_io(phba); 1540 return 1; 1541 } 1542 1543 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 1544 lpfc_unblock_mgmt_io(phba); 1545 return 1; 1546 } 1547 1548 vports = lpfc_create_vport_work_array(phba); 1549 if (vports != NULL) 1550 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1551 struct Scsi_Host *shost; 1552 shost = lpfc_shost_from_vport(vports[i]); 1553 spin_lock_irq(shost->host_lock); 1554 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 1555 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 1556 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1557 spin_unlock_irq(shost->host_lock); 1558 } 1559 lpfc_destroy_vport_work_array(phba, vports); 1560 1561 lpfc_unblock_mgmt_io(phba); 1562 return 0; 1563 } 1564 1565 void 1566 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1567 { 1568 unsigned long iflag; 1569 1570 spin_lock_irqsave(&phba->hbalock, iflag); 1571 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 1572 spin_unlock_irqrestore(&phba->hbalock, iflag); 1573 } 1574 1575 void 1576 lpfc_offline_prep(struct lpfc_hba * phba) 1577 { 1578 struct lpfc_vport *vport = phba->pport; 1579 struct lpfc_nodelist *ndlp, *next_ndlp; 1580 struct lpfc_vport **vports; 1581 int i; 1582 1583 if (vport->fc_flag & FC_OFFLINE_MODE) 1584 return; 1585 1586 lpfc_block_mgmt_io(phba); 1587 1588 lpfc_linkdown(phba); 1589 1590 /* Issue an unreg_login to all nodes on all vports */ 1591 vports = lpfc_create_vport_work_array(phba); 1592 if (vports != NULL) { 1593 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1594 struct Scsi_Host *shost; 1595 1596 if (vports[i]->load_flag & FC_UNLOADING) 1597 continue; 1598 shost = lpfc_shost_from_vport(vports[i]); 1599 list_for_each_entry_safe(ndlp, next_ndlp, 1600 &vports[i]->fc_nodes, 1601 nlp_listp) { 1602 if (!NLP_CHK_NODE_ACT(ndlp)) 1603 continue; 1604 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1605 continue; 1606 if (ndlp->nlp_type & NLP_FABRIC) { 1607 lpfc_disc_state_machine(vports[i], ndlp, 1608 NULL, NLP_EVT_DEVICE_RECOVERY); 1609 lpfc_disc_state_machine(vports[i], ndlp, 1610 NULL, NLP_EVT_DEVICE_RM); 1611 } 1612 spin_lock_irq(shost->host_lock); 1613 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1614 spin_unlock_irq(shost->host_lock); 1615 lpfc_unreg_rpi(vports[i], ndlp); 1616 } 1617 } 1618 } 1619 lpfc_destroy_vport_work_array(phba, vports); 1620 1621 lpfc_sli_flush_mbox_queue(phba); 1622 } 1623 1624 void 1625 lpfc_offline(struct lpfc_hba *phba) 1626 { 1627 struct Scsi_Host *shost; 1628 struct lpfc_vport **vports; 1629 int i; 1630 1631 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1632 return; 1633 1634 /* stop all timers associated with this hba */ 1635 lpfc_stop_phba_timers(phba); 1636 vports = lpfc_create_vport_work_array(phba); 1637 if (vports != NULL) 1638 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 1639 lpfc_stop_vport_timers(vports[i]); 1640 lpfc_destroy_vport_work_array(phba, vports); 1641 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1642 "0460 Bring Adapter offline\n"); 1643 /* Bring down the SLI Layer and cleanup. The HBA is offline 1644 now. */ 1645 lpfc_sli_hba_down(phba); 1646 spin_lock_irq(&phba->hbalock); 1647 phba->work_ha = 0; 1648 spin_unlock_irq(&phba->hbalock); 1649 vports = lpfc_create_vport_work_array(phba); 1650 if (vports != NULL) 1651 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1652 shost = lpfc_shost_from_vport(vports[i]); 1653 spin_lock_irq(shost->host_lock); 1654 vports[i]->work_port_events = 0; 1655 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1656 spin_unlock_irq(shost->host_lock); 1657 } 1658 lpfc_destroy_vport_work_array(phba, vports); 1659 } 1660 1661 /****************************************************************************** 1662 * Function name: lpfc_scsi_free 1663 * 1664 * Description: Called from lpfc_pci_remove_one free internal driver resources 1665 * 1666 ******************************************************************************/ 1667 static int 1668 lpfc_scsi_free(struct lpfc_hba *phba) 1669 { 1670 struct lpfc_scsi_buf *sb, *sb_next; 1671 struct lpfc_iocbq *io, *io_next; 1672 1673 spin_lock_irq(&phba->hbalock); 1674 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1675 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1676 list_del(&sb->list); 1677 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1678 sb->dma_handle); 1679 kfree(sb); 1680 phba->total_scsi_bufs--; 1681 } 1682 1683 /* Release all the lpfc_iocbq entries maintained by this host. */ 1684 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1685 list_del(&io->list); 1686 kfree(io); 1687 phba->total_iocbq_bufs--; 1688 } 1689 1690 spin_unlock_irq(&phba->hbalock); 1691 1692 return 0; 1693 } 1694 1695 struct lpfc_vport * 1696 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 1697 { 1698 struct lpfc_vport *vport; 1699 struct Scsi_Host *shost; 1700 int error = 0; 1701 1702 if (dev != &phba->pcidev->dev) 1703 shost = scsi_host_alloc(&lpfc_vport_template, 1704 sizeof(struct lpfc_vport)); 1705 else 1706 shost = scsi_host_alloc(&lpfc_template, 1707 sizeof(struct lpfc_vport)); 1708 if (!shost) 1709 goto out; 1710 1711 vport = (struct lpfc_vport *) shost->hostdata; 1712 vport->phba = phba; 1713 vport->load_flag |= FC_LOADING; 1714 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1715 vport->fc_rscn_flush = 0; 1716 1717 lpfc_get_vport_cfgparam(vport); 1718 shost->unique_id = instance; 1719 shost->max_id = LPFC_MAX_TARGET; 1720 shost->max_lun = vport->cfg_max_luns; 1721 shost->this_id = -1; 1722 shost->max_cmd_len = 16; 1723 /* 1724 * Set initial can_queue value since 0 is no longer supported and 1725 * scsi_add_host will fail. This will be adjusted later based on the 1726 * max xri value determined in hba setup. 1727 */ 1728 shost->can_queue = phba->cfg_hba_queue_depth - 10; 1729 if (dev != &phba->pcidev->dev) { 1730 shost->transportt = lpfc_vport_transport_template; 1731 vport->port_type = LPFC_NPIV_PORT; 1732 } else { 1733 shost->transportt = lpfc_transport_template; 1734 vport->port_type = LPFC_PHYSICAL_PORT; 1735 } 1736 1737 /* Initialize all internally managed lists. */ 1738 INIT_LIST_HEAD(&vport->fc_nodes); 1739 spin_lock_init(&vport->work_port_lock); 1740 1741 init_timer(&vport->fc_disctmo); 1742 vport->fc_disctmo.function = lpfc_disc_timeout; 1743 vport->fc_disctmo.data = (unsigned long)vport; 1744 1745 init_timer(&vport->fc_fdmitmo); 1746 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 1747 vport->fc_fdmitmo.data = (unsigned long)vport; 1748 1749 init_timer(&vport->els_tmofunc); 1750 vport->els_tmofunc.function = lpfc_els_timeout; 1751 vport->els_tmofunc.data = (unsigned long)vport; 1752 1753 error = scsi_add_host(shost, dev); 1754 if (error) 1755 goto out_put_shost; 1756 1757 spin_lock_irq(&phba->hbalock); 1758 list_add_tail(&vport->listentry, &phba->port_list); 1759 spin_unlock_irq(&phba->hbalock); 1760 return vport; 1761 1762 out_put_shost: 1763 scsi_host_put(shost); 1764 out: 1765 return NULL; 1766 } 1767 1768 void 1769 destroy_port(struct lpfc_vport *vport) 1770 { 1771 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1772 struct lpfc_hba *phba = vport->phba; 1773 1774 kfree(vport->vname); 1775 1776 lpfc_debugfs_terminate(vport); 1777 fc_remove_host(shost); 1778 scsi_remove_host(shost); 1779 1780 spin_lock_irq(&phba->hbalock); 1781 list_del_init(&vport->listentry); 1782 spin_unlock_irq(&phba->hbalock); 1783 1784 lpfc_cleanup(vport); 1785 return; 1786 } 1787 1788 int 1789 lpfc_get_instance(void) 1790 { 1791 int instance = 0; 1792 1793 /* Assign an unused number */ 1794 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1795 return -1; 1796 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 1797 return -1; 1798 return instance; 1799 } 1800 1801 /* 1802 * Note: there is no scan_start function as adapter initialization 1803 * will have asynchronously kicked off the link initialization. 1804 */ 1805 1806 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 1807 { 1808 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1809 struct lpfc_hba *phba = vport->phba; 1810 int stat = 0; 1811 1812 spin_lock_irq(shost->host_lock); 1813 1814 if (vport->load_flag & FC_UNLOADING) { 1815 stat = 1; 1816 goto finished; 1817 } 1818 if (time >= 30 * HZ) { 1819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1820 "0461 Scanning longer than 30 " 1821 "seconds. Continuing initialization\n"); 1822 stat = 1; 1823 goto finished; 1824 } 1825 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 1826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1827 "0465 Link down longer than 15 " 1828 "seconds. Continuing initialization\n"); 1829 stat = 1; 1830 goto finished; 1831 } 1832 1833 if (vport->port_state != LPFC_VPORT_READY) 1834 goto finished; 1835 if (vport->num_disc_nodes || vport->fc_prli_sent) 1836 goto finished; 1837 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 1838 goto finished; 1839 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 1840 goto finished; 1841 1842 stat = 1; 1843 1844 finished: 1845 spin_unlock_irq(shost->host_lock); 1846 return stat; 1847 } 1848 1849 void lpfc_host_attrib_init(struct Scsi_Host *shost) 1850 { 1851 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1852 struct lpfc_hba *phba = vport->phba; 1853 /* 1854 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 1855 */ 1856 1857 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 1858 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 1859 fc_host_supported_classes(shost) = FC_COS_CLASS3; 1860 1861 memset(fc_host_supported_fc4s(shost), 0, 1862 sizeof(fc_host_supported_fc4s(shost))); 1863 fc_host_supported_fc4s(shost)[2] = 1; 1864 fc_host_supported_fc4s(shost)[7] = 1; 1865 1866 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 1867 sizeof fc_host_symbolic_name(shost)); 1868 1869 fc_host_supported_speeds(shost) = 0; 1870 if (phba->lmt & LMT_10Gb) 1871 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 1872 if (phba->lmt & LMT_8Gb) 1873 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 1874 if (phba->lmt & LMT_4Gb) 1875 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 1876 if (phba->lmt & LMT_2Gb) 1877 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 1878 if (phba->lmt & LMT_1Gb) 1879 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 1880 1881 fc_host_maxframe_size(shost) = 1882 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1883 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 1884 1885 /* This value is also unchanging */ 1886 memset(fc_host_active_fc4s(shost), 0, 1887 sizeof(fc_host_active_fc4s(shost))); 1888 fc_host_active_fc4s(shost)[2] = 1; 1889 fc_host_active_fc4s(shost)[7] = 1; 1890 1891 fc_host_max_npiv_vports(shost) = phba->max_vpi; 1892 spin_lock_irq(shost->host_lock); 1893 vport->load_flag &= ~FC_LOADING; 1894 spin_unlock_irq(shost->host_lock); 1895 } 1896 1897 static int 1898 lpfc_enable_msix(struct lpfc_hba *phba) 1899 { 1900 int error; 1901 1902 phba->msix_entries[0].entry = 0; 1903 phba->msix_entries[0].vector = 0; 1904 1905 error = pci_enable_msix(phba->pcidev, phba->msix_entries, 1906 ARRAY_SIZE(phba->msix_entries)); 1907 if (error) { 1908 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1909 "0420 Enable MSI-X failed (%d), continuing " 1910 "with MSI\n", error); 1911 pci_disable_msix(phba->pcidev); 1912 return error; 1913 } 1914 1915 error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, 1916 LPFC_DRIVER_NAME, phba); 1917 if (error) { 1918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1919 "0421 MSI-X request_irq failed (%d), " 1920 "continuing with MSI\n", error); 1921 pci_disable_msix(phba->pcidev); 1922 } 1923 return error; 1924 } 1925 1926 static void 1927 lpfc_disable_msix(struct lpfc_hba *phba) 1928 { 1929 free_irq(phba->msix_entries[0].vector, phba); 1930 pci_disable_msix(phba->pcidev); 1931 } 1932 1933 static int __devinit 1934 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1935 { 1936 struct lpfc_vport *vport = NULL; 1937 struct lpfc_hba *phba; 1938 struct lpfc_sli *psli; 1939 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1940 struct Scsi_Host *shost = NULL; 1941 void *ptr; 1942 unsigned long bar0map_len, bar2map_len; 1943 int error = -ENODEV, retval; 1944 int i, hbq_count; 1945 uint16_t iotag; 1946 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 1947 1948 if (pci_enable_device_mem(pdev)) 1949 goto out; 1950 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 1951 goto out_disable_device; 1952 1953 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 1954 if (!phba) 1955 goto out_release_regions; 1956 1957 spin_lock_init(&phba->hbalock); 1958 1959 /* Initialize ndlp management spinlock */ 1960 spin_lock_init(&phba->ndlp_lock); 1961 1962 phba->pcidev = pdev; 1963 1964 /* Assign an unused board number */ 1965 if ((phba->brd_no = lpfc_get_instance()) < 0) 1966 goto out_free_phba; 1967 1968 INIT_LIST_HEAD(&phba->port_list); 1969 /* 1970 * Get all the module params for configuring this host and then 1971 * establish the host. 1972 */ 1973 lpfc_get_cfgparam(phba); 1974 phba->max_vpi = LPFC_MAX_VPI; 1975 1976 /* Initialize timers used by driver */ 1977 init_timer(&phba->hb_tmofunc); 1978 phba->hb_tmofunc.function = lpfc_hb_timeout; 1979 phba->hb_tmofunc.data = (unsigned long)phba; 1980 1981 psli = &phba->sli; 1982 init_timer(&psli->mbox_tmo); 1983 psli->mbox_tmo.function = lpfc_mbox_timeout; 1984 psli->mbox_tmo.data = (unsigned long) phba; 1985 init_timer(&phba->fcp_poll_timer); 1986 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1987 phba->fcp_poll_timer.data = (unsigned long) phba; 1988 init_timer(&phba->fabric_block_timer); 1989 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 1990 phba->fabric_block_timer.data = (unsigned long) phba; 1991 1992 pci_set_master(pdev); 1993 pci_try_set_mwi(pdev); 1994 1995 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1996 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1997 goto out_idr_remove; 1998 1999 /* 2000 * Get the bus address of Bar0 and Bar2 and the number of bytes 2001 * required by each mapping. 2002 */ 2003 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 2004 bar0map_len = pci_resource_len(phba->pcidev, 0); 2005 2006 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 2007 bar2map_len = pci_resource_len(phba->pcidev, 2); 2008 2009 /* Map HBA SLIM to a kernel virtual address. */ 2010 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 2011 if (!phba->slim_memmap_p) { 2012 error = -ENODEV; 2013 dev_printk(KERN_ERR, &pdev->dev, 2014 "ioremap failed for SLIM memory.\n"); 2015 goto out_idr_remove; 2016 } 2017 2018 /* Map HBA Control Registers to a kernel virtual address. */ 2019 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 2020 if (!phba->ctrl_regs_memmap_p) { 2021 error = -ENODEV; 2022 dev_printk(KERN_ERR, &pdev->dev, 2023 "ioremap failed for HBA control registers.\n"); 2024 goto out_iounmap_slim; 2025 } 2026 2027 /* Allocate memory for SLI-2 structures */ 2028 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 2029 &phba->slim2p_mapping, GFP_KERNEL); 2030 if (!phba->slim2p) 2031 goto out_iounmap; 2032 2033 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 2034 2035 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 2036 lpfc_sli_hbq_size(), 2037 &phba->hbqslimp.phys, 2038 GFP_KERNEL); 2039 if (!phba->hbqslimp.virt) 2040 goto out_free_slim; 2041 2042 hbq_count = lpfc_sli_hbq_count(); 2043 ptr = phba->hbqslimp.virt; 2044 for (i = 0; i < hbq_count; ++i) { 2045 phba->hbqs[i].hbq_virt = ptr; 2046 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 2047 ptr += (lpfc_hbq_defs[i]->entry_count * 2048 sizeof(struct lpfc_hbq_entry)); 2049 } 2050 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 2051 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 2052 2053 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2054 2055 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 2056 2057 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2058 lpfc_sli_setup(phba); 2059 lpfc_sli_queue_setup(phba); 2060 2061 retval = lpfc_mem_alloc(phba); 2062 if (retval) { 2063 error = retval; 2064 goto out_free_hbqslimp; 2065 } 2066 2067 /* Initialize and populate the iocb list per host. */ 2068 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2069 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 2070 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 2071 if (iocbq_entry == NULL) { 2072 printk(KERN_ERR "%s: only allocated %d iocbs of " 2073 "expected %d count. Unloading driver.\n", 2074 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2075 error = -ENOMEM; 2076 goto out_free_iocbq; 2077 } 2078 2079 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 2080 if (iotag == 0) { 2081 kfree (iocbq_entry); 2082 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2083 "Unloading driver.\n", 2084 __FUNCTION__); 2085 error = -ENOMEM; 2086 goto out_free_iocbq; 2087 } 2088 2089 spin_lock_irq(&phba->hbalock); 2090 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 2091 phba->total_iocbq_bufs++; 2092 spin_unlock_irq(&phba->hbalock); 2093 } 2094 2095 /* Initialize HBA structure */ 2096 phba->fc_edtov = FF_DEF_EDTOV; 2097 phba->fc_ratov = FF_DEF_RATOV; 2098 phba->fc_altov = FF_DEF_ALTOV; 2099 phba->fc_arbtov = FF_DEF_ARBTOV; 2100 2101 INIT_LIST_HEAD(&phba->work_list); 2102 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2103 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2104 2105 /* Startup the kernel thread for this host adapter. */ 2106 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2107 "lpfc_worker_%d", phba->brd_no); 2108 if (IS_ERR(phba->worker_thread)) { 2109 error = PTR_ERR(phba->worker_thread); 2110 goto out_free_iocbq; 2111 } 2112 2113 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 2114 spin_lock_init(&phba->scsi_buf_list_lock); 2115 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 2116 2117 /* Initialize list of fabric iocbs */ 2118 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2119 2120 /* Initialize list to save ELS buffers */ 2121 INIT_LIST_HEAD(&phba->elsbuf); 2122 2123 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2124 if (!vport) 2125 goto out_kthread_stop; 2126 2127 shost = lpfc_shost_from_vport(vport); 2128 phba->pport = vport; 2129 lpfc_debugfs_initialize(vport); 2130 2131 pci_set_drvdata(pdev, shost); 2132 phba->intr_type = NONE; 2133 2134 if (phba->cfg_use_msi == 2) { 2135 error = lpfc_enable_msix(phba); 2136 if (!error) 2137 phba->intr_type = MSIX; 2138 } 2139 2140 /* Fallback to MSI if MSI-X initialization failed */ 2141 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2142 retval = pci_enable_msi(phba->pcidev); 2143 if (!retval) 2144 phba->intr_type = MSI; 2145 else 2146 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2147 "0452 Enable MSI failed, continuing " 2148 "with IRQ\n"); 2149 } 2150 2151 /* MSI-X is the only case the doesn't need to call request_irq */ 2152 if (phba->intr_type != MSIX) { 2153 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2154 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2155 if (retval) { 2156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " 2157 "interrupt handler failed\n"); 2158 error = retval; 2159 goto out_disable_msi; 2160 } else if (phba->intr_type != MSI) 2161 phba->intr_type = INTx; 2162 } 2163 2164 phba->MBslimaddr = phba->slim_memmap_p; 2165 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2166 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 2167 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2168 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2169 2170 if (lpfc_alloc_sysfs_attr(vport)) { 2171 error = -ENOMEM; 2172 goto out_free_irq; 2173 } 2174 2175 if (lpfc_sli_hba_setup(phba)) { 2176 error = -ENODEV; 2177 goto out_remove_device; 2178 } 2179 2180 /* 2181 * hba setup may have changed the hba_queue_depth so we need to adjust 2182 * the value of can_queue. 2183 */ 2184 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2185 2186 lpfc_host_attrib_init(shost); 2187 2188 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 2189 spin_lock_irq(shost->host_lock); 2190 lpfc_poll_start_timer(phba); 2191 spin_unlock_irq(shost->host_lock); 2192 } 2193 2194 scsi_scan_host(shost); 2195 2196 return 0; 2197 2198 out_remove_device: 2199 lpfc_free_sysfs_attr(vport); 2200 spin_lock_irq(shost->host_lock); 2201 vport->load_flag |= FC_UNLOADING; 2202 spin_unlock_irq(shost->host_lock); 2203 out_free_irq: 2204 lpfc_stop_phba_timers(phba); 2205 phba->pport->work_port_events = 0; 2206 2207 if (phba->intr_type == MSIX) 2208 lpfc_disable_msix(phba); 2209 else 2210 free_irq(phba->pcidev->irq, phba); 2211 2212 out_disable_msi: 2213 if (phba->intr_type == MSI) 2214 pci_disable_msi(phba->pcidev); 2215 destroy_port(vport); 2216 out_kthread_stop: 2217 kthread_stop(phba->worker_thread); 2218 out_free_iocbq: 2219 list_for_each_entry_safe(iocbq_entry, iocbq_next, 2220 &phba->lpfc_iocb_list, list) { 2221 kfree(iocbq_entry); 2222 phba->total_iocbq_bufs--; 2223 } 2224 lpfc_mem_free(phba); 2225 out_free_hbqslimp: 2226 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2227 phba->hbqslimp.phys); 2228 out_free_slim: 2229 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 2230 phba->slim2p_mapping); 2231 out_iounmap: 2232 iounmap(phba->ctrl_regs_memmap_p); 2233 out_iounmap_slim: 2234 iounmap(phba->slim_memmap_p); 2235 out_idr_remove: 2236 idr_remove(&lpfc_hba_index, phba->brd_no); 2237 out_free_phba: 2238 kfree(phba); 2239 out_release_regions: 2240 pci_release_selected_regions(pdev, bars); 2241 out_disable_device: 2242 pci_disable_device(pdev); 2243 out: 2244 pci_set_drvdata(pdev, NULL); 2245 if (shost) 2246 scsi_host_put(shost); 2247 return error; 2248 } 2249 2250 static void __devexit 2251 lpfc_pci_remove_one(struct pci_dev *pdev) 2252 { 2253 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2254 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2255 struct lpfc_hba *phba = vport->phba; 2256 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2257 2258 spin_lock_irq(&phba->hbalock); 2259 vport->load_flag |= FC_UNLOADING; 2260 spin_unlock_irq(&phba->hbalock); 2261 2262 kfree(vport->vname); 2263 lpfc_free_sysfs_attr(vport); 2264 2265 kthread_stop(phba->worker_thread); 2266 2267 fc_remove_host(shost); 2268 scsi_remove_host(shost); 2269 lpfc_cleanup(vport); 2270 2271 /* 2272 * Bring down the SLI Layer. This step disable all interrupts, 2273 * clears the rings, discards all mailbox commands, and resets 2274 * the HBA. 2275 */ 2276 lpfc_sli_hba_down(phba); 2277 lpfc_sli_brdrestart(phba); 2278 2279 lpfc_stop_phba_timers(phba); 2280 spin_lock_irq(&phba->hbalock); 2281 list_del_init(&vport->listentry); 2282 spin_unlock_irq(&phba->hbalock); 2283 2284 lpfc_debugfs_terminate(vport); 2285 2286 if (phba->intr_type == MSIX) 2287 lpfc_disable_msix(phba); 2288 else { 2289 free_irq(phba->pcidev->irq, phba); 2290 if (phba->intr_type == MSI) 2291 pci_disable_msi(phba->pcidev); 2292 } 2293 2294 pci_set_drvdata(pdev, NULL); 2295 scsi_host_put(shost); 2296 2297 /* 2298 * Call scsi_free before mem_free since scsi bufs are released to their 2299 * corresponding pools here. 2300 */ 2301 lpfc_scsi_free(phba); 2302 lpfc_mem_free(phba); 2303 2304 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2305 phba->hbqslimp.phys); 2306 2307 /* Free resources associated with SLI2 interface */ 2308 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 2309 phba->slim2p, phba->slim2p_mapping); 2310 2311 /* unmap adapter SLIM and Control Registers */ 2312 iounmap(phba->ctrl_regs_memmap_p); 2313 iounmap(phba->slim_memmap_p); 2314 2315 idr_remove(&lpfc_hba_index, phba->brd_no); 2316 2317 kfree(phba); 2318 2319 pci_release_selected_regions(pdev, bars); 2320 pci_disable_device(pdev); 2321 } 2322 2323 /** 2324 * lpfc_io_error_detected - called when PCI error is detected 2325 * @pdev: Pointer to PCI device 2326 * @state: The current pci conneection state 2327 * 2328 * This function is called after a PCI bus error affecting 2329 * this device has been detected. 2330 */ 2331 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 2332 pci_channel_state_t state) 2333 { 2334 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2335 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2336 struct lpfc_sli *psli = &phba->sli; 2337 struct lpfc_sli_ring *pring; 2338 2339 if (state == pci_channel_io_perm_failure) 2340 return PCI_ERS_RESULT_DISCONNECT; 2341 2342 pci_disable_device(pdev); 2343 /* 2344 * There may be I/Os dropped by the firmware. 2345 * Error iocb (I/O) on txcmplq and let the SCSI layer 2346 * retry it after re-establishing link. 2347 */ 2348 pring = &psli->ring[psli->fcp_ring]; 2349 lpfc_sli_abort_iocb_ring(phba, pring); 2350 2351 if (phba->intr_type == MSIX) 2352 lpfc_disable_msix(phba); 2353 else { 2354 free_irq(phba->pcidev->irq, phba); 2355 if (phba->intr_type == MSI) 2356 pci_disable_msi(phba->pcidev); 2357 } 2358 2359 /* Request a slot reset. */ 2360 return PCI_ERS_RESULT_NEED_RESET; 2361 } 2362 2363 /** 2364 * lpfc_io_slot_reset - called after the pci bus has been reset. 2365 * @pdev: Pointer to PCI device 2366 * 2367 * Restart the card from scratch, as if from a cold-boot. 2368 */ 2369 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 2370 { 2371 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2372 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2373 struct lpfc_sli *psli = &phba->sli; 2374 2375 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 2376 if (pci_enable_device_mem(pdev)) { 2377 printk(KERN_ERR "lpfc: Cannot re-enable " 2378 "PCI device after reset.\n"); 2379 return PCI_ERS_RESULT_DISCONNECT; 2380 } 2381 2382 pci_set_master(pdev); 2383 2384 spin_lock_irq(&phba->hbalock); 2385 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2386 spin_unlock_irq(&phba->hbalock); 2387 2388 2389 /* Take device offline; this will perform cleanup */ 2390 lpfc_offline(phba); 2391 lpfc_sli_brdrestart(phba); 2392 2393 return PCI_ERS_RESULT_RECOVERED; 2394 } 2395 2396 /** 2397 * lpfc_io_resume - called when traffic can start flowing again. 2398 * @pdev: Pointer to PCI device 2399 * 2400 * This callback is called when the error recovery driver tells us that 2401 * its OK to resume normal operation. 2402 */ 2403 static void lpfc_io_resume(struct pci_dev *pdev) 2404 { 2405 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2406 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2407 2408 lpfc_online(phba); 2409 } 2410 2411 static struct pci_device_id lpfc_id_table[] = { 2412 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 2413 PCI_ANY_ID, PCI_ANY_ID, }, 2414 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 2415 PCI_ANY_ID, PCI_ANY_ID, }, 2416 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 2417 PCI_ANY_ID, PCI_ANY_ID, }, 2418 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 2419 PCI_ANY_ID, PCI_ANY_ID, }, 2420 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 2421 PCI_ANY_ID, PCI_ANY_ID, }, 2422 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 2423 PCI_ANY_ID, PCI_ANY_ID, }, 2424 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 2425 PCI_ANY_ID, PCI_ANY_ID, }, 2426 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 2427 PCI_ANY_ID, PCI_ANY_ID, }, 2428 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 2429 PCI_ANY_ID, PCI_ANY_ID, }, 2430 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 2431 PCI_ANY_ID, PCI_ANY_ID, }, 2432 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 2433 PCI_ANY_ID, PCI_ANY_ID, }, 2434 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 2435 PCI_ANY_ID, PCI_ANY_ID, }, 2436 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 2437 PCI_ANY_ID, PCI_ANY_ID, }, 2438 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 2439 PCI_ANY_ID, PCI_ANY_ID, }, 2440 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 2441 PCI_ANY_ID, PCI_ANY_ID, }, 2442 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 2443 PCI_ANY_ID, PCI_ANY_ID, }, 2444 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 2445 PCI_ANY_ID, PCI_ANY_ID, }, 2446 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 2447 PCI_ANY_ID, PCI_ANY_ID, }, 2448 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 2449 PCI_ANY_ID, PCI_ANY_ID, }, 2450 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 2451 PCI_ANY_ID, PCI_ANY_ID, }, 2452 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 2453 PCI_ANY_ID, PCI_ANY_ID, }, 2454 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 2455 PCI_ANY_ID, PCI_ANY_ID, }, 2456 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 2457 PCI_ANY_ID, PCI_ANY_ID, }, 2458 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 2459 PCI_ANY_ID, PCI_ANY_ID, }, 2460 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 2461 PCI_ANY_ID, PCI_ANY_ID, }, 2462 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 2463 PCI_ANY_ID, PCI_ANY_ID, }, 2464 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 2465 PCI_ANY_ID, PCI_ANY_ID, }, 2466 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 2467 PCI_ANY_ID, PCI_ANY_ID, }, 2468 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 2469 PCI_ANY_ID, PCI_ANY_ID, }, 2470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 2471 PCI_ANY_ID, PCI_ANY_ID, }, 2472 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 2473 PCI_ANY_ID, PCI_ANY_ID, }, 2474 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 2475 PCI_ANY_ID, PCI_ANY_ID, }, 2476 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 2477 PCI_ANY_ID, PCI_ANY_ID, }, 2478 { 0 } 2479 }; 2480 2481 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 2482 2483 static struct pci_error_handlers lpfc_err_handler = { 2484 .error_detected = lpfc_io_error_detected, 2485 .slot_reset = lpfc_io_slot_reset, 2486 .resume = lpfc_io_resume, 2487 }; 2488 2489 static struct pci_driver lpfc_driver = { 2490 .name = LPFC_DRIVER_NAME, 2491 .id_table = lpfc_id_table, 2492 .probe = lpfc_pci_probe_one, 2493 .remove = __devexit_p(lpfc_pci_remove_one), 2494 .err_handler = &lpfc_err_handler, 2495 }; 2496 2497 static int __init 2498 lpfc_init(void) 2499 { 2500 int error = 0; 2501 2502 printk(LPFC_MODULE_DESC "\n"); 2503 printk(LPFC_COPYRIGHT "\n"); 2504 2505 if (lpfc_enable_npiv) { 2506 lpfc_transport_functions.vport_create = lpfc_vport_create; 2507 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 2508 } 2509 lpfc_transport_template = 2510 fc_attach_transport(&lpfc_transport_functions); 2511 if (lpfc_transport_template == NULL) 2512 return -ENOMEM; 2513 if (lpfc_enable_npiv) { 2514 lpfc_vport_transport_template = 2515 fc_attach_transport(&lpfc_vport_transport_functions); 2516 if (lpfc_vport_transport_template == NULL) { 2517 fc_release_transport(lpfc_transport_template); 2518 return -ENOMEM; 2519 } 2520 } 2521 error = pci_register_driver(&lpfc_driver); 2522 if (error) { 2523 fc_release_transport(lpfc_transport_template); 2524 fc_release_transport(lpfc_vport_transport_template); 2525 } 2526 2527 return error; 2528 } 2529 2530 static void __exit 2531 lpfc_exit(void) 2532 { 2533 pci_unregister_driver(&lpfc_driver); 2534 fc_release_transport(lpfc_transport_template); 2535 if (lpfc_enable_npiv) 2536 fc_release_transport(lpfc_vport_transport_template); 2537 } 2538 2539 module_init(lpfc_init); 2540 module_exit(lpfc_exit); 2541 MODULE_LICENSE("GPL"); 2542 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 2543 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 2544 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 2545