1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_version.h" 46 47 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49 static int lpfc_post_rcv_buf(struct lpfc_hba *); 50 51 static struct scsi_transport_template *lpfc_transport_template = NULL; 52 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 53 static DEFINE_IDR(lpfc_hba_index); 54 55 /************************************************************************/ 56 /* */ 57 /* lpfc_config_port_prep */ 58 /* This routine will do LPFC initialization prior to the */ 59 /* CONFIG_PORT mailbox command. This will be initialized */ 60 /* as a SLI layer callback routine. */ 61 /* This routine returns 0 on success or -ERESTART if it wants */ 62 /* the SLI layer to reset the HBA and try again. Any */ 63 /* other return value indicates an error. */ 64 /* */ 65 /************************************************************************/ 66 int 67 lpfc_config_port_prep(struct lpfc_hba *phba) 68 { 69 lpfc_vpd_t *vp = &phba->vpd; 70 int i = 0, rc; 71 LPFC_MBOXQ_t *pmb; 72 MAILBOX_t *mb; 73 char *lpfc_vpd_data = NULL; 74 uint16_t offset = 0; 75 static char licensed[56] = 76 "key unlock for use with gnu public licensed code only\0"; 77 static int init_key = 1; 78 79 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 80 if (!pmb) { 81 phba->link_state = LPFC_HBA_ERROR; 82 return -ENOMEM; 83 } 84 85 mb = &pmb->mb; 86 phba->link_state = LPFC_INIT_MBX_CMDS; 87 88 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 89 if (init_key) { 90 uint32_t *ptext = (uint32_t *) licensed; 91 92 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 93 *ptext = cpu_to_be32(*ptext); 94 init_key = 0; 95 } 96 97 lpfc_read_nv(phba, pmb); 98 memset((char*)mb->un.varRDnvp.rsvd3, 0, 99 sizeof (mb->un.varRDnvp.rsvd3)); 100 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 101 sizeof (licensed)); 102 103 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 104 105 if (rc != MBX_SUCCESS) { 106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 107 "0324 Config Port initialization " 108 "error, mbxCmd x%x READ_NVPARM, " 109 "mbxStatus x%x\n", 110 mb->mbxCommand, mb->mbxStatus); 111 mempool_free(pmb, phba->mbox_mem_pool); 112 return -ERESTART; 113 } 114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 115 sizeof(phba->wwnn)); 116 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 117 sizeof(phba->wwpn)); 118 } 119 120 phba->sli3_options = 0x0; 121 122 /* Setup and issue mailbox READ REV command */ 123 lpfc_read_rev(phba, pmb); 124 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 125 if (rc != MBX_SUCCESS) { 126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 127 "0439 Adapter failed to init, mbxCmd x%x " 128 "READ_REV, mbxStatus x%x\n", 129 mb->mbxCommand, mb->mbxStatus); 130 mempool_free( pmb, phba->mbox_mem_pool); 131 return -ERESTART; 132 } 133 134 135 /* 136 * The value of rr must be 1 since the driver set the cv field to 1. 137 * This setting requires the FW to set all revision fields. 138 */ 139 if (mb->un.varRdRev.rr == 0) { 140 vp->rev.rBit = 0; 141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 142 "0440 Adapter failed to init, READ_REV has " 143 "missing revision information.\n"); 144 mempool_free(pmb, phba->mbox_mem_pool); 145 return -ERESTART; 146 } 147 148 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) 149 return -EINVAL; 150 151 /* Save information as VPD data */ 152 vp->rev.rBit = 1; 153 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 154 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 155 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 156 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 157 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 158 vp->rev.biuRev = mb->un.varRdRev.biuRev; 159 vp->rev.smRev = mb->un.varRdRev.smRev; 160 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 161 vp->rev.endecRev = mb->un.varRdRev.endecRev; 162 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 163 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 164 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 165 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 166 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 167 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 168 169 /* If the sli feature level is less then 9, we must 170 * tear down all RPIs and VPIs on link down if NPIV 171 * is enabled. 172 */ 173 if (vp->rev.feaLevelHigh < 9) 174 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 175 176 if (lpfc_is_LC_HBA(phba->pcidev->device)) 177 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 178 sizeof (phba->RandomData)); 179 180 /* Get adapter VPD information */ 181 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 182 if (!pmb->context2) 183 goto out_free_mbox; 184 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 185 if (!lpfc_vpd_data) 186 goto out_free_context2; 187 188 do { 189 lpfc_dump_mem(phba, pmb, offset); 190 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 191 192 if (rc != MBX_SUCCESS) { 193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 194 "0441 VPD not present on adapter, " 195 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 196 mb->mbxCommand, mb->mbxStatus); 197 mb->un.varDmp.word_cnt = 0; 198 } 199 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 200 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 201 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 202 mb->un.varDmp.word_cnt); 203 offset += mb->un.varDmp.word_cnt; 204 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 205 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 206 207 kfree(lpfc_vpd_data); 208 out_free_context2: 209 kfree(pmb->context2); 210 out_free_mbox: 211 mempool_free(pmb, phba->mbox_mem_pool); 212 return 0; 213 } 214 215 /* Completion handler for config async event mailbox command. */ 216 static void 217 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 218 { 219 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 220 phba->temp_sensor_support = 1; 221 else 222 phba->temp_sensor_support = 0; 223 mempool_free(pmboxq, phba->mbox_mem_pool); 224 return; 225 } 226 227 /************************************************************************/ 228 /* */ 229 /* lpfc_config_port_post */ 230 /* This routine will do LPFC initialization after the */ 231 /* CONFIG_PORT mailbox command. This will be initialized */ 232 /* as a SLI layer callback routine. */ 233 /* This routine returns 0 on success. Any other return value */ 234 /* indicates an error. */ 235 /* */ 236 /************************************************************************/ 237 int 238 lpfc_config_port_post(struct lpfc_hba *phba) 239 { 240 struct lpfc_vport *vport = phba->pport; 241 LPFC_MBOXQ_t *pmb; 242 MAILBOX_t *mb; 243 struct lpfc_dmabuf *mp; 244 struct lpfc_sli *psli = &phba->sli; 245 uint32_t status, timeout; 246 int i, j; 247 int rc; 248 249 spin_lock_irq(&phba->hbalock); 250 /* 251 * If the Config port completed correctly the HBA is not 252 * over heated any more. 253 */ 254 if (phba->over_temp_state == HBA_OVER_TEMP) 255 phba->over_temp_state = HBA_NORMAL_TEMP; 256 spin_unlock_irq(&phba->hbalock); 257 258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 259 if (!pmb) { 260 phba->link_state = LPFC_HBA_ERROR; 261 return -ENOMEM; 262 } 263 mb = &pmb->mb; 264 265 /* Get login parameters for NID. */ 266 lpfc_read_sparam(phba, pmb, 0); 267 pmb->vport = vport; 268 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 270 "0448 Adapter failed init, mbxCmd x%x " 271 "READ_SPARM mbxStatus x%x\n", 272 mb->mbxCommand, mb->mbxStatus); 273 phba->link_state = LPFC_HBA_ERROR; 274 mp = (struct lpfc_dmabuf *) pmb->context1; 275 mempool_free( pmb, phba->mbox_mem_pool); 276 lpfc_mbuf_free(phba, mp->virt, mp->phys); 277 kfree(mp); 278 return -EIO; 279 } 280 281 mp = (struct lpfc_dmabuf *) pmb->context1; 282 283 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 284 lpfc_mbuf_free(phba, mp->virt, mp->phys); 285 kfree(mp); 286 pmb->context1 = NULL; 287 288 if (phba->cfg_soft_wwnn) 289 u64_to_wwn(phba->cfg_soft_wwnn, 290 vport->fc_sparam.nodeName.u.wwn); 291 if (phba->cfg_soft_wwpn) 292 u64_to_wwn(phba->cfg_soft_wwpn, 293 vport->fc_sparam.portName.u.wwn); 294 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 295 sizeof (struct lpfc_name)); 296 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 297 sizeof (struct lpfc_name)); 298 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 299 /* This should be consolidated into parse_vpd ? - mr */ 300 if (phba->SerialNumber[0] == 0) { 301 uint8_t *outptr; 302 303 outptr = &vport->fc_nodename.u.s.IEEE[0]; 304 for (i = 0; i < 12; i++) { 305 status = *outptr++; 306 j = ((status & 0xf0) >> 4); 307 if (j <= 9) 308 phba->SerialNumber[i] = 309 (char)((uint8_t) 0x30 + (uint8_t) j); 310 else 311 phba->SerialNumber[i] = 312 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 313 i++; 314 j = (status & 0xf); 315 if (j <= 9) 316 phba->SerialNumber[i] = 317 (char)((uint8_t) 0x30 + (uint8_t) j); 318 else 319 phba->SerialNumber[i] = 320 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 321 } 322 } 323 324 lpfc_read_config(phba, pmb); 325 pmb->vport = vport; 326 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 328 "0453 Adapter failed to init, mbxCmd x%x " 329 "READ_CONFIG, mbxStatus x%x\n", 330 mb->mbxCommand, mb->mbxStatus); 331 phba->link_state = LPFC_HBA_ERROR; 332 mempool_free( pmb, phba->mbox_mem_pool); 333 return -EIO; 334 } 335 336 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 337 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 338 phba->cfg_hba_queue_depth = 339 mb->un.varRdConfig.max_xri + 1; 340 341 phba->lmt = mb->un.varRdConfig.lmt; 342 343 /* Get the default values for Model Name and Description */ 344 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 345 346 if ((phba->cfg_link_speed > LINK_SPEED_10G) 347 || ((phba->cfg_link_speed == LINK_SPEED_1G) 348 && !(phba->lmt & LMT_1Gb)) 349 || ((phba->cfg_link_speed == LINK_SPEED_2G) 350 && !(phba->lmt & LMT_2Gb)) 351 || ((phba->cfg_link_speed == LINK_SPEED_4G) 352 && !(phba->lmt & LMT_4Gb)) 353 || ((phba->cfg_link_speed == LINK_SPEED_8G) 354 && !(phba->lmt & LMT_8Gb)) 355 || ((phba->cfg_link_speed == LINK_SPEED_10G) 356 && !(phba->lmt & LMT_10Gb))) { 357 /* Reset link speed to auto */ 358 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 359 "1302 Invalid speed for this board: " 360 "Reset link speed to auto: x%x\n", 361 phba->cfg_link_speed); 362 phba->cfg_link_speed = LINK_SPEED_AUTO; 363 } 364 365 phba->link_state = LPFC_LINK_DOWN; 366 367 /* Only process IOCBs on ELS ring till hba_state is READY */ 368 if (psli->ring[psli->extra_ring].cmdringaddr) 369 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 370 if (psli->ring[psli->fcp_ring].cmdringaddr) 371 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 372 if (psli->ring[psli->next_ring].cmdringaddr) 373 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 374 375 /* Post receive buffers for desired rings */ 376 if (phba->sli_rev != 3) 377 lpfc_post_rcv_buf(phba); 378 379 /* Enable appropriate host interrupts */ 380 spin_lock_irq(&phba->hbalock); 381 status = readl(phba->HCregaddr); 382 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 383 if (psli->num_rings > 0) 384 status |= HC_R0INT_ENA; 385 if (psli->num_rings > 1) 386 status |= HC_R1INT_ENA; 387 if (psli->num_rings > 2) 388 status |= HC_R2INT_ENA; 389 if (psli->num_rings > 3) 390 status |= HC_R3INT_ENA; 391 392 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 393 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 394 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 395 396 writel(status, phba->HCregaddr); 397 readl(phba->HCregaddr); /* flush */ 398 spin_unlock_irq(&phba->hbalock); 399 400 /* 401 * Setup the ring 0 (els) timeout handler 402 */ 403 timeout = phba->fc_ratov << 1; 404 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 405 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 406 phba->hb_outstanding = 0; 407 phba->last_completion_time = jiffies; 408 409 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 410 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 411 pmb->vport = vport; 412 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 413 lpfc_set_loopback_flag(phba); 414 if (rc != MBX_SUCCESS) { 415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 416 "0454 Adapter failed to init, mbxCmd x%x " 417 "INIT_LINK, mbxStatus x%x\n", 418 mb->mbxCommand, mb->mbxStatus); 419 420 /* Clear all interrupt enable conditions */ 421 writel(0, phba->HCregaddr); 422 readl(phba->HCregaddr); /* flush */ 423 /* Clear all pending interrupts */ 424 writel(0xffffffff, phba->HAregaddr); 425 readl(phba->HAregaddr); /* flush */ 426 427 phba->link_state = LPFC_HBA_ERROR; 428 if (rc != MBX_BUSY) 429 mempool_free(pmb, phba->mbox_mem_pool); 430 return -EIO; 431 } 432 /* MBOX buffer will be freed in mbox compl */ 433 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 434 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 435 pmb->mbox_cmpl = lpfc_config_async_cmpl; 436 pmb->vport = phba->pport; 437 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 438 439 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 440 lpfc_printf_log(phba, 441 KERN_ERR, 442 LOG_INIT, 443 "0456 Adapter failed to issue " 444 "ASYNCEVT_ENABLE mbox status x%x \n.", 445 rc); 446 mempool_free(pmb, phba->mbox_mem_pool); 447 } 448 return (0); 449 } 450 451 /************************************************************************/ 452 /* */ 453 /* lpfc_hba_down_prep */ 454 /* This routine will do LPFC uninitialization before the */ 455 /* HBA is reset when bringing down the SLI Layer. This will be */ 456 /* initialized as a SLI layer callback routine. */ 457 /* This routine returns 0 on success. Any other return value */ 458 /* indicates an error. */ 459 /* */ 460 /************************************************************************/ 461 int 462 lpfc_hba_down_prep(struct lpfc_hba *phba) 463 { 464 struct lpfc_vport **vports; 465 int i; 466 /* Disable interrupts */ 467 writel(0, phba->HCregaddr); 468 readl(phba->HCregaddr); /* flush */ 469 470 if (phba->pport->load_flag & FC_UNLOADING) 471 lpfc_cleanup_discovery_resources(phba->pport); 472 else { 473 vports = lpfc_create_vport_work_array(phba); 474 if (vports != NULL) 475 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 476 lpfc_cleanup_discovery_resources(vports[i]); 477 lpfc_destroy_vport_work_array(phba, vports); 478 } 479 return 0; 480 } 481 482 /************************************************************************/ 483 /* */ 484 /* lpfc_hba_down_post */ 485 /* This routine will do uninitialization after the HBA is reset */ 486 /* when bringing down the SLI Layer. */ 487 /* This routine returns 0 on success. Any other return value */ 488 /* indicates an error. */ 489 /* */ 490 /************************************************************************/ 491 int 492 lpfc_hba_down_post(struct lpfc_hba *phba) 493 { 494 struct lpfc_sli *psli = &phba->sli; 495 struct lpfc_sli_ring *pring; 496 struct lpfc_dmabuf *mp, *next_mp; 497 struct lpfc_iocbq *iocb; 498 IOCB_t *cmd = NULL; 499 LIST_HEAD(completions); 500 int i; 501 502 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 503 lpfc_sli_hbqbuf_free_all(phba); 504 else { 505 /* Cleanup preposted buffers on the ELS ring */ 506 pring = &psli->ring[LPFC_ELS_RING]; 507 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 508 list_del(&mp->list); 509 pring->postbufq_cnt--; 510 lpfc_mbuf_free(phba, mp->virt, mp->phys); 511 kfree(mp); 512 } 513 } 514 515 spin_lock_irq(&phba->hbalock); 516 for (i = 0; i < psli->num_rings; i++) { 517 pring = &psli->ring[i]; 518 519 /* At this point in time the HBA is either reset or DOA. Either 520 * way, nothing should be on txcmplq as it will NEVER complete. 521 */ 522 list_splice_init(&pring->txcmplq, &completions); 523 pring->txcmplq_cnt = 0; 524 spin_unlock_irq(&phba->hbalock); 525 526 while (!list_empty(&completions)) { 527 iocb = list_get_first(&completions, struct lpfc_iocbq, 528 list); 529 cmd = &iocb->iocb; 530 list_del_init(&iocb->list); 531 532 if (!iocb->iocb_cmpl) 533 lpfc_sli_release_iocbq(phba, iocb); 534 else { 535 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 536 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 537 (iocb->iocb_cmpl) (phba, iocb, iocb); 538 } 539 } 540 541 lpfc_sli_abort_iocb_ring(phba, pring); 542 spin_lock_irq(&phba->hbalock); 543 } 544 spin_unlock_irq(&phba->hbalock); 545 546 return 0; 547 } 548 549 /* HBA heart beat timeout handler */ 550 static void 551 lpfc_hb_timeout(unsigned long ptr) 552 { 553 struct lpfc_hba *phba; 554 uint32_t tmo_posted; 555 unsigned long iflag; 556 557 phba = (struct lpfc_hba *)ptr; 558 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 559 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 560 if (!tmo_posted) 561 phba->pport->work_port_events |= WORKER_HB_TMO; 562 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 563 564 if (!tmo_posted) 565 lpfc_worker_wake_up(phba); 566 return; 567 } 568 569 static void 570 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 571 { 572 unsigned long drvr_flag; 573 574 spin_lock_irqsave(&phba->hbalock, drvr_flag); 575 phba->hb_outstanding = 0; 576 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 577 578 mempool_free(pmboxq, phba->mbox_mem_pool); 579 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 580 !(phba->link_state == LPFC_HBA_ERROR) && 581 !(phba->pport->load_flag & FC_UNLOADING)) 582 mod_timer(&phba->hb_tmofunc, 583 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 584 return; 585 } 586 587 void 588 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 589 { 590 LPFC_MBOXQ_t *pmboxq; 591 struct lpfc_dmabuf *buf_ptr; 592 int retval; 593 struct lpfc_sli *psli = &phba->sli; 594 LIST_HEAD(completions); 595 596 if ((phba->link_state == LPFC_HBA_ERROR) || 597 (phba->pport->load_flag & FC_UNLOADING) || 598 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 599 return; 600 601 spin_lock_irq(&phba->pport->work_port_lock); 602 /* If the timer is already canceled do nothing */ 603 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) { 604 spin_unlock_irq(&phba->pport->work_port_lock); 605 return; 606 } 607 608 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 609 jiffies)) { 610 spin_unlock_irq(&phba->pport->work_port_lock); 611 if (!phba->hb_outstanding) 612 mod_timer(&phba->hb_tmofunc, 613 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 614 else 615 mod_timer(&phba->hb_tmofunc, 616 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 617 return; 618 } 619 spin_unlock_irq(&phba->pport->work_port_lock); 620 621 if (phba->elsbuf_cnt && 622 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 623 spin_lock_irq(&phba->hbalock); 624 list_splice_init(&phba->elsbuf, &completions); 625 phba->elsbuf_cnt = 0; 626 phba->elsbuf_prev_cnt = 0; 627 spin_unlock_irq(&phba->hbalock); 628 629 while (!list_empty(&completions)) { 630 list_remove_head(&completions, buf_ptr, 631 struct lpfc_dmabuf, list); 632 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 633 kfree(buf_ptr); 634 } 635 } 636 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 637 638 /* If there is no heart beat outstanding, issue a heartbeat command */ 639 if (phba->cfg_enable_hba_heartbeat) { 640 if (!phba->hb_outstanding) { 641 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 642 if (!pmboxq) { 643 mod_timer(&phba->hb_tmofunc, 644 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 645 return; 646 } 647 648 lpfc_heart_beat(phba, pmboxq); 649 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 650 pmboxq->vport = phba->pport; 651 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 652 653 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 654 mempool_free(pmboxq, phba->mbox_mem_pool); 655 mod_timer(&phba->hb_tmofunc, 656 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 657 return; 658 } 659 mod_timer(&phba->hb_tmofunc, 660 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 661 phba->hb_outstanding = 1; 662 return; 663 } else { 664 /* 665 * If heart beat timeout called with hb_outstanding set 666 * we need to take the HBA offline. 667 */ 668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 669 "0459 Adapter heartbeat failure, " 670 "taking this port offline.\n"); 671 672 spin_lock_irq(&phba->hbalock); 673 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 674 spin_unlock_irq(&phba->hbalock); 675 676 lpfc_offline_prep(phba); 677 lpfc_offline(phba); 678 lpfc_unblock_mgmt_io(phba); 679 phba->link_state = LPFC_HBA_ERROR; 680 lpfc_hba_down_post(phba); 681 } 682 } 683 } 684 685 static void 686 lpfc_offline_eratt(struct lpfc_hba *phba) 687 { 688 struct lpfc_sli *psli = &phba->sli; 689 690 spin_lock_irq(&phba->hbalock); 691 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 692 spin_unlock_irq(&phba->hbalock); 693 lpfc_offline_prep(phba); 694 695 lpfc_offline(phba); 696 lpfc_reset_barrier(phba); 697 lpfc_sli_brdreset(phba); 698 lpfc_hba_down_post(phba); 699 lpfc_sli_brdready(phba, HS_MBRDY); 700 lpfc_unblock_mgmt_io(phba); 701 phba->link_state = LPFC_HBA_ERROR; 702 return; 703 } 704 705 /************************************************************************/ 706 /* */ 707 /* lpfc_handle_eratt */ 708 /* This routine will handle processing a Host Attention */ 709 /* Error Status event. This will be initialized */ 710 /* as a SLI layer callback routine. */ 711 /* */ 712 /************************************************************************/ 713 void 714 lpfc_handle_eratt(struct lpfc_hba *phba) 715 { 716 struct lpfc_vport *vport = phba->pport; 717 struct lpfc_sli *psli = &phba->sli; 718 struct lpfc_sli_ring *pring; 719 uint32_t event_data; 720 unsigned long temperature; 721 struct temp_event temp_event_data; 722 struct Scsi_Host *shost; 723 724 /* If the pci channel is offline, ignore possible errors, 725 * since we cannot communicate with the pci card anyway. */ 726 if (pci_channel_offline(phba->pcidev)) 727 return; 728 /* If resets are disabled then leave the HBA alone and return */ 729 if (!phba->cfg_enable_hba_reset) 730 return; 731 732 if (phba->work_hs & HS_FFER6) { 733 /* Re-establishing Link */ 734 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 735 "1301 Re-establishing Link " 736 "Data: x%x x%x x%x\n", 737 phba->work_hs, 738 phba->work_status[0], phba->work_status[1]); 739 740 spin_lock_irq(&phba->hbalock); 741 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 742 spin_unlock_irq(&phba->hbalock); 743 744 /* 745 * Firmware stops when it triggled erratt with HS_FFER6. 746 * That could cause the I/Os dropped by the firmware. 747 * Error iocb (I/O) on txcmplq and let the SCSI layer 748 * retry it after re-establishing link. 749 */ 750 pring = &psli->ring[psli->fcp_ring]; 751 lpfc_sli_abort_iocb_ring(phba, pring); 752 753 /* 754 * There was a firmware error. Take the hba offline and then 755 * attempt to restart it. 756 */ 757 lpfc_offline_prep(phba); 758 lpfc_offline(phba); 759 lpfc_sli_brdrestart(phba); 760 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 761 lpfc_unblock_mgmt_io(phba); 762 return; 763 } 764 lpfc_unblock_mgmt_io(phba); 765 } else if (phba->work_hs & HS_CRIT_TEMP) { 766 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 767 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 768 temp_event_data.event_code = LPFC_CRIT_TEMP; 769 temp_event_data.data = (uint32_t)temperature; 770 771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 772 "0459 Adapter maximum temperature exceeded " 773 "(%ld), taking this port offline " 774 "Data: x%x x%x x%x\n", 775 temperature, phba->work_hs, 776 phba->work_status[0], phba->work_status[1]); 777 778 shost = lpfc_shost_from_vport(phba->pport); 779 fc_host_post_vendor_event(shost, fc_get_event_number(), 780 sizeof(temp_event_data), 781 (char *) &temp_event_data, 782 SCSI_NL_VID_TYPE_PCI 783 | PCI_VENDOR_ID_EMULEX); 784 785 spin_lock_irq(&phba->hbalock); 786 phba->over_temp_state = HBA_OVER_TEMP; 787 spin_unlock_irq(&phba->hbalock); 788 lpfc_offline_eratt(phba); 789 790 } else { 791 /* The if clause above forces this code path when the status 792 * failure is a value other than FFER6. Do not call the offline 793 * twice. This is the adapter hardware error path. 794 */ 795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 796 "0457 Adapter Hardware Error " 797 "Data: x%x x%x x%x\n", 798 phba->work_hs, 799 phba->work_status[0], phba->work_status[1]); 800 801 event_data = FC_REG_DUMP_EVENT; 802 shost = lpfc_shost_from_vport(vport); 803 fc_host_post_vendor_event(shost, fc_get_event_number(), 804 sizeof(event_data), (char *) &event_data, 805 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 806 807 lpfc_offline_eratt(phba); 808 } 809 } 810 811 /************************************************************************/ 812 /* */ 813 /* lpfc_handle_latt */ 814 /* This routine will handle processing a Host Attention */ 815 /* Link Status event. This will be initialized */ 816 /* as a SLI layer callback routine. */ 817 /* */ 818 /************************************************************************/ 819 void 820 lpfc_handle_latt(struct lpfc_hba *phba) 821 { 822 struct lpfc_vport *vport = phba->pport; 823 struct lpfc_sli *psli = &phba->sli; 824 LPFC_MBOXQ_t *pmb; 825 volatile uint32_t control; 826 struct lpfc_dmabuf *mp; 827 int rc = 0; 828 829 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 if (!pmb) { 831 rc = 1; 832 goto lpfc_handle_latt_err_exit; 833 } 834 835 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 836 if (!mp) { 837 rc = 2; 838 goto lpfc_handle_latt_free_pmb; 839 } 840 841 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 842 if (!mp->virt) { 843 rc = 3; 844 goto lpfc_handle_latt_free_mp; 845 } 846 847 /* Cleanup any outstanding ELS commands */ 848 lpfc_els_flush_all_cmd(phba); 849 850 psli->slistat.link_event++; 851 lpfc_read_la(phba, pmb, mp); 852 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 853 pmb->vport = vport; 854 /* Block ELS IOCBs until we have processed this mbox command */ 855 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 856 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 857 if (rc == MBX_NOT_FINISHED) { 858 rc = 4; 859 goto lpfc_handle_latt_free_mbuf; 860 } 861 862 /* Clear Link Attention in HA REG */ 863 spin_lock_irq(&phba->hbalock); 864 writel(HA_LATT, phba->HAregaddr); 865 readl(phba->HAregaddr); /* flush */ 866 spin_unlock_irq(&phba->hbalock); 867 868 return; 869 870 lpfc_handle_latt_free_mbuf: 871 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 872 lpfc_mbuf_free(phba, mp->virt, mp->phys); 873 lpfc_handle_latt_free_mp: 874 kfree(mp); 875 lpfc_handle_latt_free_pmb: 876 mempool_free(pmb, phba->mbox_mem_pool); 877 lpfc_handle_latt_err_exit: 878 /* Enable Link attention interrupts */ 879 spin_lock_irq(&phba->hbalock); 880 psli->sli_flag |= LPFC_PROCESS_LA; 881 control = readl(phba->HCregaddr); 882 control |= HC_LAINT_ENA; 883 writel(control, phba->HCregaddr); 884 readl(phba->HCregaddr); /* flush */ 885 886 /* Clear Link Attention in HA REG */ 887 writel(HA_LATT, phba->HAregaddr); 888 readl(phba->HAregaddr); /* flush */ 889 spin_unlock_irq(&phba->hbalock); 890 lpfc_linkdown(phba); 891 phba->link_state = LPFC_HBA_ERROR; 892 893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 894 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 895 896 return; 897 } 898 899 /************************************************************************/ 900 /* */ 901 /* lpfc_parse_vpd */ 902 /* This routine will parse the VPD data */ 903 /* */ 904 /************************************************************************/ 905 static int 906 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 907 { 908 uint8_t lenlo, lenhi; 909 int Length; 910 int i, j; 911 int finished = 0; 912 int index = 0; 913 914 if (!vpd) 915 return 0; 916 917 /* Vital Product */ 918 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 919 "0455 Vital Product Data: x%x x%x x%x x%x\n", 920 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 921 (uint32_t) vpd[3]); 922 while (!finished && (index < (len - 4))) { 923 switch (vpd[index]) { 924 case 0x82: 925 case 0x91: 926 index += 1; 927 lenlo = vpd[index]; 928 index += 1; 929 lenhi = vpd[index]; 930 index += 1; 931 i = ((((unsigned short)lenhi) << 8) + lenlo); 932 index += i; 933 break; 934 case 0x90: 935 index += 1; 936 lenlo = vpd[index]; 937 index += 1; 938 lenhi = vpd[index]; 939 index += 1; 940 Length = ((((unsigned short)lenhi) << 8) + lenlo); 941 if (Length > len - index) 942 Length = len - index; 943 while (Length > 0) { 944 /* Look for Serial Number */ 945 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 946 index += 2; 947 i = vpd[index]; 948 index += 1; 949 j = 0; 950 Length -= (3+i); 951 while(i--) { 952 phba->SerialNumber[j++] = vpd[index++]; 953 if (j == 31) 954 break; 955 } 956 phba->SerialNumber[j] = 0; 957 continue; 958 } 959 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 960 phba->vpd_flag |= VPD_MODEL_DESC; 961 index += 2; 962 i = vpd[index]; 963 index += 1; 964 j = 0; 965 Length -= (3+i); 966 while(i--) { 967 phba->ModelDesc[j++] = vpd[index++]; 968 if (j == 255) 969 break; 970 } 971 phba->ModelDesc[j] = 0; 972 continue; 973 } 974 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 975 phba->vpd_flag |= VPD_MODEL_NAME; 976 index += 2; 977 i = vpd[index]; 978 index += 1; 979 j = 0; 980 Length -= (3+i); 981 while(i--) { 982 phba->ModelName[j++] = vpd[index++]; 983 if (j == 79) 984 break; 985 } 986 phba->ModelName[j] = 0; 987 continue; 988 } 989 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 990 phba->vpd_flag |= VPD_PROGRAM_TYPE; 991 index += 2; 992 i = vpd[index]; 993 index += 1; 994 j = 0; 995 Length -= (3+i); 996 while(i--) { 997 phba->ProgramType[j++] = vpd[index++]; 998 if (j == 255) 999 break; 1000 } 1001 phba->ProgramType[j] = 0; 1002 continue; 1003 } 1004 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1005 phba->vpd_flag |= VPD_PORT; 1006 index += 2; 1007 i = vpd[index]; 1008 index += 1; 1009 j = 0; 1010 Length -= (3+i); 1011 while(i--) { 1012 phba->Port[j++] = vpd[index++]; 1013 if (j == 19) 1014 break; 1015 } 1016 phba->Port[j] = 0; 1017 continue; 1018 } 1019 else { 1020 index += 2; 1021 i = vpd[index]; 1022 index += 1; 1023 index += i; 1024 Length -= (3 + i); 1025 } 1026 } 1027 finished = 0; 1028 break; 1029 case 0x78: 1030 finished = 1; 1031 break; 1032 default: 1033 index ++; 1034 break; 1035 } 1036 } 1037 1038 return(1); 1039 } 1040 1041 static void 1042 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1043 { 1044 lpfc_vpd_t *vp; 1045 uint16_t dev_id = phba->pcidev->device; 1046 int max_speed; 1047 struct { 1048 char * name; 1049 int max_speed; 1050 char * bus; 1051 } m = {"<Unknown>", 0, ""}; 1052 1053 if (mdp && mdp[0] != '\0' 1054 && descp && descp[0] != '\0') 1055 return; 1056 1057 if (phba->lmt & LMT_10Gb) 1058 max_speed = 10; 1059 else if (phba->lmt & LMT_8Gb) 1060 max_speed = 8; 1061 else if (phba->lmt & LMT_4Gb) 1062 max_speed = 4; 1063 else if (phba->lmt & LMT_2Gb) 1064 max_speed = 2; 1065 else 1066 max_speed = 1; 1067 1068 vp = &phba->vpd; 1069 1070 switch (dev_id) { 1071 case PCI_DEVICE_ID_FIREFLY: 1072 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1073 break; 1074 case PCI_DEVICE_ID_SUPERFLY: 1075 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1076 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1077 else 1078 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1079 break; 1080 case PCI_DEVICE_ID_DRAGONFLY: 1081 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1082 break; 1083 case PCI_DEVICE_ID_CENTAUR: 1084 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1085 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1086 else 1087 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1088 break; 1089 case PCI_DEVICE_ID_RFLY: 1090 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1091 break; 1092 case PCI_DEVICE_ID_PEGASUS: 1093 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1094 break; 1095 case PCI_DEVICE_ID_THOR: 1096 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1097 break; 1098 case PCI_DEVICE_ID_VIPER: 1099 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1100 break; 1101 case PCI_DEVICE_ID_PFLY: 1102 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1103 break; 1104 case PCI_DEVICE_ID_TFLY: 1105 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1106 break; 1107 case PCI_DEVICE_ID_HELIOS: 1108 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1109 break; 1110 case PCI_DEVICE_ID_HELIOS_SCSP: 1111 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1112 break; 1113 case PCI_DEVICE_ID_HELIOS_DCSP: 1114 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1115 break; 1116 case PCI_DEVICE_ID_NEPTUNE: 1117 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1118 break; 1119 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1120 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1121 break; 1122 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1123 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1124 break; 1125 case PCI_DEVICE_ID_BMID: 1126 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1127 break; 1128 case PCI_DEVICE_ID_BSMB: 1129 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1130 break; 1131 case PCI_DEVICE_ID_ZEPHYR: 1132 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1133 break; 1134 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1135 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1136 break; 1137 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1138 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 1139 break; 1140 case PCI_DEVICE_ID_ZMID: 1141 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1142 break; 1143 case PCI_DEVICE_ID_ZSMB: 1144 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1145 break; 1146 case PCI_DEVICE_ID_LP101: 1147 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1148 break; 1149 case PCI_DEVICE_ID_LP10000S: 1150 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1151 break; 1152 case PCI_DEVICE_ID_LP11000S: 1153 m = (typeof(m)){"LP11000-S", max_speed, 1154 "PCI-X2"}; 1155 break; 1156 case PCI_DEVICE_ID_LPE11000S: 1157 m = (typeof(m)){"LPe11000-S", max_speed, 1158 "PCIe"}; 1159 break; 1160 case PCI_DEVICE_ID_SAT: 1161 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1162 break; 1163 case PCI_DEVICE_ID_SAT_MID: 1164 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1165 break; 1166 case PCI_DEVICE_ID_SAT_SMB: 1167 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1168 break; 1169 case PCI_DEVICE_ID_SAT_DCSP: 1170 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1171 break; 1172 case PCI_DEVICE_ID_SAT_SCSP: 1173 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1174 break; 1175 case PCI_DEVICE_ID_SAT_S: 1176 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1177 break; 1178 default: 1179 m = (typeof(m)){ NULL }; 1180 break; 1181 } 1182 1183 if (mdp && mdp[0] == '\0') 1184 snprintf(mdp, 79,"%s", m.name); 1185 if (descp && descp[0] == '\0') 1186 snprintf(descp, 255, 1187 "Emulex %s %dGb %s Fibre Channel Adapter", 1188 m.name, m.max_speed, m.bus); 1189 } 1190 1191 /**************************************************/ 1192 /* lpfc_post_buffer */ 1193 /* */ 1194 /* This routine will post count buffers to the */ 1195 /* ring with the QUE_RING_BUF_CN command. This */ 1196 /* allows 3 buffers / command to be posted. */ 1197 /* Returns the number of buffers NOT posted. */ 1198 /**************************************************/ 1199 int 1200 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt, 1201 int type) 1202 { 1203 IOCB_t *icmd; 1204 struct lpfc_iocbq *iocb; 1205 struct lpfc_dmabuf *mp1, *mp2; 1206 1207 cnt += pring->missbufcnt; 1208 1209 /* While there are buffers to post */ 1210 while (cnt > 0) { 1211 /* Allocate buffer for command iocb */ 1212 iocb = lpfc_sli_get_iocbq(phba); 1213 if (iocb == NULL) { 1214 pring->missbufcnt = cnt; 1215 return cnt; 1216 } 1217 icmd = &iocb->iocb; 1218 1219 /* 2 buffers can be posted per command */ 1220 /* Allocate buffer to post */ 1221 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1222 if (mp1) 1223 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1224 if (!mp1 || !mp1->virt) { 1225 kfree(mp1); 1226 lpfc_sli_release_iocbq(phba, iocb); 1227 pring->missbufcnt = cnt; 1228 return cnt; 1229 } 1230 1231 INIT_LIST_HEAD(&mp1->list); 1232 /* Allocate buffer to post */ 1233 if (cnt > 1) { 1234 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1235 if (mp2) 1236 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1237 &mp2->phys); 1238 if (!mp2 || !mp2->virt) { 1239 kfree(mp2); 1240 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1241 kfree(mp1); 1242 lpfc_sli_release_iocbq(phba, iocb); 1243 pring->missbufcnt = cnt; 1244 return cnt; 1245 } 1246 1247 INIT_LIST_HEAD(&mp2->list); 1248 } else { 1249 mp2 = NULL; 1250 } 1251 1252 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1253 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1254 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1255 icmd->ulpBdeCount = 1; 1256 cnt--; 1257 if (mp2) { 1258 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1259 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1260 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1261 cnt--; 1262 icmd->ulpBdeCount = 2; 1263 } 1264 1265 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1266 icmd->ulpLe = 1; 1267 1268 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1269 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1270 kfree(mp1); 1271 cnt++; 1272 if (mp2) { 1273 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1274 kfree(mp2); 1275 cnt++; 1276 } 1277 lpfc_sli_release_iocbq(phba, iocb); 1278 pring->missbufcnt = cnt; 1279 return cnt; 1280 } 1281 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1282 if (mp2) 1283 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1284 } 1285 pring->missbufcnt = 0; 1286 return 0; 1287 } 1288 1289 /************************************************************************/ 1290 /* */ 1291 /* lpfc_post_rcv_buf */ 1292 /* This routine post initial rcv buffers to the configured rings */ 1293 /* */ 1294 /************************************************************************/ 1295 static int 1296 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1297 { 1298 struct lpfc_sli *psli = &phba->sli; 1299 1300 /* Ring 0, ELS / CT buffers */ 1301 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1302 /* Ring 2 - FCP no buffers needed */ 1303 1304 return 0; 1305 } 1306 1307 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1308 1309 /************************************************************************/ 1310 /* */ 1311 /* lpfc_sha_init */ 1312 /* */ 1313 /************************************************************************/ 1314 static void 1315 lpfc_sha_init(uint32_t * HashResultPointer) 1316 { 1317 HashResultPointer[0] = 0x67452301; 1318 HashResultPointer[1] = 0xEFCDAB89; 1319 HashResultPointer[2] = 0x98BADCFE; 1320 HashResultPointer[3] = 0x10325476; 1321 HashResultPointer[4] = 0xC3D2E1F0; 1322 } 1323 1324 /************************************************************************/ 1325 /* */ 1326 /* lpfc_sha_iterate */ 1327 /* */ 1328 /************************************************************************/ 1329 static void 1330 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1331 { 1332 int t; 1333 uint32_t TEMP; 1334 uint32_t A, B, C, D, E; 1335 t = 16; 1336 do { 1337 HashWorkingPointer[t] = 1338 S(1, 1339 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1340 8] ^ 1341 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1342 } while (++t <= 79); 1343 t = 0; 1344 A = HashResultPointer[0]; 1345 B = HashResultPointer[1]; 1346 C = HashResultPointer[2]; 1347 D = HashResultPointer[3]; 1348 E = HashResultPointer[4]; 1349 1350 do { 1351 if (t < 20) { 1352 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1353 } else if (t < 40) { 1354 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1355 } else if (t < 60) { 1356 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1357 } else { 1358 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1359 } 1360 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1361 E = D; 1362 D = C; 1363 C = S(30, B); 1364 B = A; 1365 A = TEMP; 1366 } while (++t <= 79); 1367 1368 HashResultPointer[0] += A; 1369 HashResultPointer[1] += B; 1370 HashResultPointer[2] += C; 1371 HashResultPointer[3] += D; 1372 HashResultPointer[4] += E; 1373 1374 } 1375 1376 /************************************************************************/ 1377 /* */ 1378 /* lpfc_challenge_key */ 1379 /* */ 1380 /************************************************************************/ 1381 static void 1382 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1383 { 1384 *HashWorking = (*RandomChallenge ^ *HashWorking); 1385 } 1386 1387 /************************************************************************/ 1388 /* */ 1389 /* lpfc_hba_init */ 1390 /* */ 1391 /************************************************************************/ 1392 void 1393 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1394 { 1395 int t; 1396 uint32_t *HashWorking; 1397 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1398 1399 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1400 if (!HashWorking) 1401 return; 1402 1403 HashWorking[0] = HashWorking[78] = *pwwnn++; 1404 HashWorking[1] = HashWorking[79] = *pwwnn; 1405 1406 for (t = 0; t < 7; t++) 1407 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1408 1409 lpfc_sha_init(hbainit); 1410 lpfc_sha_iterate(hbainit, HashWorking); 1411 kfree(HashWorking); 1412 } 1413 1414 void 1415 lpfc_cleanup(struct lpfc_vport *vport) 1416 { 1417 struct lpfc_hba *phba = vport->phba; 1418 struct lpfc_nodelist *ndlp, *next_ndlp; 1419 int i = 0; 1420 1421 if (phba->link_state > LPFC_LINK_DOWN) 1422 lpfc_port_link_failure(vport); 1423 1424 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1425 if (!NLP_CHK_NODE_ACT(ndlp)) { 1426 ndlp = lpfc_enable_node(vport, ndlp, 1427 NLP_STE_UNUSED_NODE); 1428 if (!ndlp) 1429 continue; 1430 spin_lock_irq(&phba->ndlp_lock); 1431 NLP_SET_FREE_REQ(ndlp); 1432 spin_unlock_irq(&phba->ndlp_lock); 1433 /* Trigger the release of the ndlp memory */ 1434 lpfc_nlp_put(ndlp); 1435 continue; 1436 } 1437 spin_lock_irq(&phba->ndlp_lock); 1438 if (NLP_CHK_FREE_REQ(ndlp)) { 1439 /* The ndlp should not be in memory free mode already */ 1440 spin_unlock_irq(&phba->ndlp_lock); 1441 continue; 1442 } else 1443 /* Indicate request for freeing ndlp memory */ 1444 NLP_SET_FREE_REQ(ndlp); 1445 spin_unlock_irq(&phba->ndlp_lock); 1446 1447 if (vport->port_type != LPFC_PHYSICAL_PORT && 1448 ndlp->nlp_DID == Fabric_DID) { 1449 /* Just free up ndlp with Fabric_DID for vports */ 1450 lpfc_nlp_put(ndlp); 1451 continue; 1452 } 1453 1454 if (ndlp->nlp_type & NLP_FABRIC) 1455 lpfc_disc_state_machine(vport, ndlp, NULL, 1456 NLP_EVT_DEVICE_RECOVERY); 1457 1458 lpfc_disc_state_machine(vport, ndlp, NULL, 1459 NLP_EVT_DEVICE_RM); 1460 } 1461 1462 /* At this point, ALL ndlp's should be gone 1463 * because of the previous NLP_EVT_DEVICE_RM. 1464 * Lets wait for this to happen, if needed. 1465 */ 1466 while (!list_empty(&vport->fc_nodes)) { 1467 1468 if (i++ > 3000) { 1469 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1470 "0233 Nodelist not empty\n"); 1471 list_for_each_entry_safe(ndlp, next_ndlp, 1472 &vport->fc_nodes, nlp_listp) { 1473 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1474 LOG_NODE, 1475 "0282: did:x%x ndlp:x%p " 1476 "usgmap:x%x refcnt:%d\n", 1477 ndlp->nlp_DID, (void *)ndlp, 1478 ndlp->nlp_usg_map, 1479 atomic_read( 1480 &ndlp->kref.refcount)); 1481 } 1482 break; 1483 } 1484 1485 /* Wait for any activity on ndlps to settle */ 1486 msleep(10); 1487 } 1488 return; 1489 } 1490 1491 void 1492 lpfc_stop_vport_timers(struct lpfc_vport *vport) 1493 { 1494 del_timer_sync(&vport->els_tmofunc); 1495 del_timer_sync(&vport->fc_fdmitmo); 1496 lpfc_can_disctmo(vport); 1497 return; 1498 } 1499 1500 static void 1501 lpfc_stop_phba_timers(struct lpfc_hba *phba) 1502 { 1503 del_timer_sync(&phba->fcp_poll_timer); 1504 lpfc_stop_vport_timers(phba->pport); 1505 del_timer_sync(&phba->sli.mbox_tmo); 1506 del_timer_sync(&phba->fabric_block_timer); 1507 phba->hb_outstanding = 0; 1508 del_timer_sync(&phba->hb_tmofunc); 1509 return; 1510 } 1511 1512 static void 1513 lpfc_block_mgmt_io(struct lpfc_hba * phba) 1514 { 1515 unsigned long iflag; 1516 1517 spin_lock_irqsave(&phba->hbalock, iflag); 1518 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 1519 spin_unlock_irqrestore(&phba->hbalock, iflag); 1520 } 1521 1522 int 1523 lpfc_online(struct lpfc_hba *phba) 1524 { 1525 struct lpfc_vport *vport = phba->pport; 1526 struct lpfc_vport **vports; 1527 int i; 1528 1529 if (!phba) 1530 return 0; 1531 1532 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1533 return 0; 1534 1535 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1536 "0458 Bring Adapter online\n"); 1537 1538 lpfc_block_mgmt_io(phba); 1539 1540 if (!lpfc_sli_queue_setup(phba)) { 1541 lpfc_unblock_mgmt_io(phba); 1542 return 1; 1543 } 1544 1545 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 1546 lpfc_unblock_mgmt_io(phba); 1547 return 1; 1548 } 1549 1550 vports = lpfc_create_vport_work_array(phba); 1551 if (vports != NULL) 1552 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1553 struct Scsi_Host *shost; 1554 shost = lpfc_shost_from_vport(vports[i]); 1555 spin_lock_irq(shost->host_lock); 1556 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 1557 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 1558 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1559 spin_unlock_irq(shost->host_lock); 1560 } 1561 lpfc_destroy_vport_work_array(phba, vports); 1562 1563 lpfc_unblock_mgmt_io(phba); 1564 return 0; 1565 } 1566 1567 void 1568 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1569 { 1570 unsigned long iflag; 1571 1572 spin_lock_irqsave(&phba->hbalock, iflag); 1573 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 1574 spin_unlock_irqrestore(&phba->hbalock, iflag); 1575 } 1576 1577 void 1578 lpfc_offline_prep(struct lpfc_hba * phba) 1579 { 1580 struct lpfc_vport *vport = phba->pport; 1581 struct lpfc_nodelist *ndlp, *next_ndlp; 1582 struct lpfc_vport **vports; 1583 int i; 1584 1585 if (vport->fc_flag & FC_OFFLINE_MODE) 1586 return; 1587 1588 lpfc_block_mgmt_io(phba); 1589 1590 lpfc_linkdown(phba); 1591 1592 /* Issue an unreg_login to all nodes on all vports */ 1593 vports = lpfc_create_vport_work_array(phba); 1594 if (vports != NULL) { 1595 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1596 struct Scsi_Host *shost; 1597 1598 if (vports[i]->load_flag & FC_UNLOADING) 1599 continue; 1600 shost = lpfc_shost_from_vport(vports[i]); 1601 list_for_each_entry_safe(ndlp, next_ndlp, 1602 &vports[i]->fc_nodes, 1603 nlp_listp) { 1604 if (!NLP_CHK_NODE_ACT(ndlp)) 1605 continue; 1606 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1607 continue; 1608 if (ndlp->nlp_type & NLP_FABRIC) { 1609 lpfc_disc_state_machine(vports[i], ndlp, 1610 NULL, NLP_EVT_DEVICE_RECOVERY); 1611 lpfc_disc_state_machine(vports[i], ndlp, 1612 NULL, NLP_EVT_DEVICE_RM); 1613 } 1614 spin_lock_irq(shost->host_lock); 1615 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1616 spin_unlock_irq(shost->host_lock); 1617 lpfc_unreg_rpi(vports[i], ndlp); 1618 } 1619 } 1620 } 1621 lpfc_destroy_vport_work_array(phba, vports); 1622 1623 lpfc_sli_flush_mbox_queue(phba); 1624 } 1625 1626 void 1627 lpfc_offline(struct lpfc_hba *phba) 1628 { 1629 struct Scsi_Host *shost; 1630 struct lpfc_vport **vports; 1631 int i; 1632 1633 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1634 return; 1635 1636 /* stop all timers associated with this hba */ 1637 lpfc_stop_phba_timers(phba); 1638 vports = lpfc_create_vport_work_array(phba); 1639 if (vports != NULL) 1640 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 1641 lpfc_stop_vport_timers(vports[i]); 1642 lpfc_destroy_vport_work_array(phba, vports); 1643 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1644 "0460 Bring Adapter offline\n"); 1645 /* Bring down the SLI Layer and cleanup. The HBA is offline 1646 now. */ 1647 lpfc_sli_hba_down(phba); 1648 spin_lock_irq(&phba->hbalock); 1649 phba->work_ha = 0; 1650 spin_unlock_irq(&phba->hbalock); 1651 vports = lpfc_create_vport_work_array(phba); 1652 if (vports != NULL) 1653 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1654 shost = lpfc_shost_from_vport(vports[i]); 1655 spin_lock_irq(shost->host_lock); 1656 vports[i]->work_port_events = 0; 1657 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1658 spin_unlock_irq(shost->host_lock); 1659 } 1660 lpfc_destroy_vport_work_array(phba, vports); 1661 } 1662 1663 /****************************************************************************** 1664 * Function name: lpfc_scsi_free 1665 * 1666 * Description: Called from lpfc_pci_remove_one free internal driver resources 1667 * 1668 ******************************************************************************/ 1669 static int 1670 lpfc_scsi_free(struct lpfc_hba *phba) 1671 { 1672 struct lpfc_scsi_buf *sb, *sb_next; 1673 struct lpfc_iocbq *io, *io_next; 1674 1675 spin_lock_irq(&phba->hbalock); 1676 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1677 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1678 list_del(&sb->list); 1679 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1680 sb->dma_handle); 1681 kfree(sb); 1682 phba->total_scsi_bufs--; 1683 } 1684 1685 /* Release all the lpfc_iocbq entries maintained by this host. */ 1686 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1687 list_del(&io->list); 1688 kfree(io); 1689 phba->total_iocbq_bufs--; 1690 } 1691 1692 spin_unlock_irq(&phba->hbalock); 1693 1694 return 0; 1695 } 1696 1697 struct lpfc_vport * 1698 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 1699 { 1700 struct lpfc_vport *vport; 1701 struct Scsi_Host *shost; 1702 int error = 0; 1703 1704 if (dev != &phba->pcidev->dev) 1705 shost = scsi_host_alloc(&lpfc_vport_template, 1706 sizeof(struct lpfc_vport)); 1707 else 1708 shost = scsi_host_alloc(&lpfc_template, 1709 sizeof(struct lpfc_vport)); 1710 if (!shost) 1711 goto out; 1712 1713 vport = (struct lpfc_vport *) shost->hostdata; 1714 vport->phba = phba; 1715 vport->load_flag |= FC_LOADING; 1716 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1717 vport->fc_rscn_flush = 0; 1718 1719 lpfc_get_vport_cfgparam(vport); 1720 shost->unique_id = instance; 1721 shost->max_id = LPFC_MAX_TARGET; 1722 shost->max_lun = vport->cfg_max_luns; 1723 shost->this_id = -1; 1724 shost->max_cmd_len = 16; 1725 /* 1726 * Set initial can_queue value since 0 is no longer supported and 1727 * scsi_add_host will fail. This will be adjusted later based on the 1728 * max xri value determined in hba setup. 1729 */ 1730 shost->can_queue = phba->cfg_hba_queue_depth - 10; 1731 if (dev != &phba->pcidev->dev) { 1732 shost->transportt = lpfc_vport_transport_template; 1733 vport->port_type = LPFC_NPIV_PORT; 1734 } else { 1735 shost->transportt = lpfc_transport_template; 1736 vport->port_type = LPFC_PHYSICAL_PORT; 1737 } 1738 1739 /* Initialize all internally managed lists. */ 1740 INIT_LIST_HEAD(&vport->fc_nodes); 1741 spin_lock_init(&vport->work_port_lock); 1742 1743 init_timer(&vport->fc_disctmo); 1744 vport->fc_disctmo.function = lpfc_disc_timeout; 1745 vport->fc_disctmo.data = (unsigned long)vport; 1746 1747 init_timer(&vport->fc_fdmitmo); 1748 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 1749 vport->fc_fdmitmo.data = (unsigned long)vport; 1750 1751 init_timer(&vport->els_tmofunc); 1752 vport->els_tmofunc.function = lpfc_els_timeout; 1753 vport->els_tmofunc.data = (unsigned long)vport; 1754 1755 error = scsi_add_host(shost, dev); 1756 if (error) 1757 goto out_put_shost; 1758 1759 spin_lock_irq(&phba->hbalock); 1760 list_add_tail(&vport->listentry, &phba->port_list); 1761 spin_unlock_irq(&phba->hbalock); 1762 return vport; 1763 1764 out_put_shost: 1765 scsi_host_put(shost); 1766 out: 1767 return NULL; 1768 } 1769 1770 void 1771 destroy_port(struct lpfc_vport *vport) 1772 { 1773 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1774 struct lpfc_hba *phba = vport->phba; 1775 1776 kfree(vport->vname); 1777 1778 lpfc_debugfs_terminate(vport); 1779 fc_remove_host(shost); 1780 scsi_remove_host(shost); 1781 1782 spin_lock_irq(&phba->hbalock); 1783 list_del_init(&vport->listentry); 1784 spin_unlock_irq(&phba->hbalock); 1785 1786 lpfc_cleanup(vport); 1787 return; 1788 } 1789 1790 int 1791 lpfc_get_instance(void) 1792 { 1793 int instance = 0; 1794 1795 /* Assign an unused number */ 1796 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1797 return -1; 1798 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 1799 return -1; 1800 return instance; 1801 } 1802 1803 /* 1804 * Note: there is no scan_start function as adapter initialization 1805 * will have asynchronously kicked off the link initialization. 1806 */ 1807 1808 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 1809 { 1810 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1811 struct lpfc_hba *phba = vport->phba; 1812 int stat = 0; 1813 1814 spin_lock_irq(shost->host_lock); 1815 1816 if (vport->load_flag & FC_UNLOADING) { 1817 stat = 1; 1818 goto finished; 1819 } 1820 if (time >= 30 * HZ) { 1821 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1822 "0461 Scanning longer than 30 " 1823 "seconds. Continuing initialization\n"); 1824 stat = 1; 1825 goto finished; 1826 } 1827 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 1828 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1829 "0465 Link down longer than 15 " 1830 "seconds. Continuing initialization\n"); 1831 stat = 1; 1832 goto finished; 1833 } 1834 1835 if (vport->port_state != LPFC_VPORT_READY) 1836 goto finished; 1837 if (vport->num_disc_nodes || vport->fc_prli_sent) 1838 goto finished; 1839 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 1840 goto finished; 1841 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 1842 goto finished; 1843 1844 stat = 1; 1845 1846 finished: 1847 spin_unlock_irq(shost->host_lock); 1848 return stat; 1849 } 1850 1851 void lpfc_host_attrib_init(struct Scsi_Host *shost) 1852 { 1853 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 1854 struct lpfc_hba *phba = vport->phba; 1855 /* 1856 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 1857 */ 1858 1859 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 1860 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 1861 fc_host_supported_classes(shost) = FC_COS_CLASS3; 1862 1863 memset(fc_host_supported_fc4s(shost), 0, 1864 sizeof(fc_host_supported_fc4s(shost))); 1865 fc_host_supported_fc4s(shost)[2] = 1; 1866 fc_host_supported_fc4s(shost)[7] = 1; 1867 1868 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 1869 sizeof fc_host_symbolic_name(shost)); 1870 1871 fc_host_supported_speeds(shost) = 0; 1872 if (phba->lmt & LMT_10Gb) 1873 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 1874 if (phba->lmt & LMT_8Gb) 1875 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 1876 if (phba->lmt & LMT_4Gb) 1877 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 1878 if (phba->lmt & LMT_2Gb) 1879 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 1880 if (phba->lmt & LMT_1Gb) 1881 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 1882 1883 fc_host_maxframe_size(shost) = 1884 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1885 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 1886 1887 /* This value is also unchanging */ 1888 memset(fc_host_active_fc4s(shost), 0, 1889 sizeof(fc_host_active_fc4s(shost))); 1890 fc_host_active_fc4s(shost)[2] = 1; 1891 fc_host_active_fc4s(shost)[7] = 1; 1892 1893 fc_host_max_npiv_vports(shost) = phba->max_vpi; 1894 spin_lock_irq(shost->host_lock); 1895 vport->load_flag &= ~FC_LOADING; 1896 spin_unlock_irq(shost->host_lock); 1897 } 1898 1899 static int 1900 lpfc_enable_msix(struct lpfc_hba *phba) 1901 { 1902 int error; 1903 1904 phba->msix_entries[0].entry = 0; 1905 phba->msix_entries[0].vector = 0; 1906 1907 error = pci_enable_msix(phba->pcidev, phba->msix_entries, 1908 ARRAY_SIZE(phba->msix_entries)); 1909 if (error) { 1910 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1911 "0420 Enable MSI-X failed (%d), continuing " 1912 "with MSI\n", error); 1913 pci_disable_msix(phba->pcidev); 1914 return error; 1915 } 1916 1917 error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0, 1918 LPFC_DRIVER_NAME, phba); 1919 if (error) { 1920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1921 "0421 MSI-X request_irq failed (%d), " 1922 "continuing with MSI\n", error); 1923 pci_disable_msix(phba->pcidev); 1924 } 1925 return error; 1926 } 1927 1928 static void 1929 lpfc_disable_msix(struct lpfc_hba *phba) 1930 { 1931 free_irq(phba->msix_entries[0].vector, phba); 1932 pci_disable_msix(phba->pcidev); 1933 } 1934 1935 static int __devinit 1936 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1937 { 1938 struct lpfc_vport *vport = NULL; 1939 struct lpfc_hba *phba; 1940 struct lpfc_sli *psli; 1941 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1942 struct Scsi_Host *shost = NULL; 1943 void *ptr; 1944 unsigned long bar0map_len, bar2map_len; 1945 int error = -ENODEV, retval; 1946 int i, hbq_count; 1947 uint16_t iotag; 1948 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 1949 1950 if (pci_enable_device_mem(pdev)) 1951 goto out; 1952 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 1953 goto out_disable_device; 1954 1955 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 1956 if (!phba) 1957 goto out_release_regions; 1958 1959 spin_lock_init(&phba->hbalock); 1960 1961 /* Initialize ndlp management spinlock */ 1962 spin_lock_init(&phba->ndlp_lock); 1963 1964 phba->pcidev = pdev; 1965 1966 /* Assign an unused board number */ 1967 if ((phba->brd_no = lpfc_get_instance()) < 0) 1968 goto out_free_phba; 1969 1970 INIT_LIST_HEAD(&phba->port_list); 1971 /* 1972 * Get all the module params for configuring this host and then 1973 * establish the host. 1974 */ 1975 lpfc_get_cfgparam(phba); 1976 phba->max_vpi = LPFC_MAX_VPI; 1977 1978 /* Initialize timers used by driver */ 1979 init_timer(&phba->hb_tmofunc); 1980 phba->hb_tmofunc.function = lpfc_hb_timeout; 1981 phba->hb_tmofunc.data = (unsigned long)phba; 1982 1983 psli = &phba->sli; 1984 init_timer(&psli->mbox_tmo); 1985 psli->mbox_tmo.function = lpfc_mbox_timeout; 1986 psli->mbox_tmo.data = (unsigned long) phba; 1987 init_timer(&phba->fcp_poll_timer); 1988 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1989 phba->fcp_poll_timer.data = (unsigned long) phba; 1990 init_timer(&phba->fabric_block_timer); 1991 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 1992 phba->fabric_block_timer.data = (unsigned long) phba; 1993 1994 pci_set_master(pdev); 1995 pci_try_set_mwi(pdev); 1996 1997 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1998 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1999 goto out_idr_remove; 2000 2001 /* 2002 * Get the bus address of Bar0 and Bar2 and the number of bytes 2003 * required by each mapping. 2004 */ 2005 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 2006 bar0map_len = pci_resource_len(phba->pcidev, 0); 2007 2008 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 2009 bar2map_len = pci_resource_len(phba->pcidev, 2); 2010 2011 /* Map HBA SLIM to a kernel virtual address. */ 2012 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 2013 if (!phba->slim_memmap_p) { 2014 error = -ENODEV; 2015 dev_printk(KERN_ERR, &pdev->dev, 2016 "ioremap failed for SLIM memory.\n"); 2017 goto out_idr_remove; 2018 } 2019 2020 /* Map HBA Control Registers to a kernel virtual address. */ 2021 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 2022 if (!phba->ctrl_regs_memmap_p) { 2023 error = -ENODEV; 2024 dev_printk(KERN_ERR, &pdev->dev, 2025 "ioremap failed for HBA control registers.\n"); 2026 goto out_iounmap_slim; 2027 } 2028 2029 /* Allocate memory for SLI-2 structures */ 2030 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 2031 &phba->slim2p_mapping, GFP_KERNEL); 2032 if (!phba->slim2p) 2033 goto out_iounmap; 2034 2035 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 2036 2037 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 2038 lpfc_sli_hbq_size(), 2039 &phba->hbqslimp.phys, 2040 GFP_KERNEL); 2041 if (!phba->hbqslimp.virt) 2042 goto out_free_slim; 2043 2044 hbq_count = lpfc_sli_hbq_count(); 2045 ptr = phba->hbqslimp.virt; 2046 for (i = 0; i < hbq_count; ++i) { 2047 phba->hbqs[i].hbq_virt = ptr; 2048 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 2049 ptr += (lpfc_hbq_defs[i]->entry_count * 2050 sizeof(struct lpfc_hbq_entry)); 2051 } 2052 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 2053 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 2054 2055 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2056 2057 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 2058 2059 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2060 lpfc_sli_setup(phba); 2061 lpfc_sli_queue_setup(phba); 2062 2063 retval = lpfc_mem_alloc(phba); 2064 if (retval) { 2065 error = retval; 2066 goto out_free_hbqslimp; 2067 } 2068 2069 /* Initialize and populate the iocb list per host. */ 2070 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2071 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 2072 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 2073 if (iocbq_entry == NULL) { 2074 printk(KERN_ERR "%s: only allocated %d iocbs of " 2075 "expected %d count. Unloading driver.\n", 2076 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2077 error = -ENOMEM; 2078 goto out_free_iocbq; 2079 } 2080 2081 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 2082 if (iotag == 0) { 2083 kfree (iocbq_entry); 2084 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2085 "Unloading driver.\n", 2086 __FUNCTION__); 2087 error = -ENOMEM; 2088 goto out_free_iocbq; 2089 } 2090 2091 spin_lock_irq(&phba->hbalock); 2092 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 2093 phba->total_iocbq_bufs++; 2094 spin_unlock_irq(&phba->hbalock); 2095 } 2096 2097 /* Initialize HBA structure */ 2098 phba->fc_edtov = FF_DEF_EDTOV; 2099 phba->fc_ratov = FF_DEF_RATOV; 2100 phba->fc_altov = FF_DEF_ALTOV; 2101 phba->fc_arbtov = FF_DEF_ARBTOV; 2102 2103 INIT_LIST_HEAD(&phba->work_list); 2104 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 2105 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2106 2107 /* Initialize the wait queue head for the kernel thread */ 2108 init_waitqueue_head(&phba->work_waitq); 2109 2110 /* Startup the kernel thread for this host adapter. */ 2111 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2112 "lpfc_worker_%d", phba->brd_no); 2113 if (IS_ERR(phba->worker_thread)) { 2114 error = PTR_ERR(phba->worker_thread); 2115 goto out_free_iocbq; 2116 } 2117 2118 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 2119 spin_lock_init(&phba->scsi_buf_list_lock); 2120 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 2121 2122 /* Initialize list of fabric iocbs */ 2123 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2124 2125 /* Initialize list to save ELS buffers */ 2126 INIT_LIST_HEAD(&phba->elsbuf); 2127 2128 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2129 if (!vport) 2130 goto out_kthread_stop; 2131 2132 shost = lpfc_shost_from_vport(vport); 2133 phba->pport = vport; 2134 lpfc_debugfs_initialize(vport); 2135 2136 pci_set_drvdata(pdev, shost); 2137 phba->intr_type = NONE; 2138 2139 if (phba->cfg_use_msi == 2) { 2140 error = lpfc_enable_msix(phba); 2141 if (!error) 2142 phba->intr_type = MSIX; 2143 } 2144 2145 /* Fallback to MSI if MSI-X initialization failed */ 2146 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2147 retval = pci_enable_msi(phba->pcidev); 2148 if (!retval) 2149 phba->intr_type = MSI; 2150 else 2151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2152 "0452 Enable MSI failed, continuing " 2153 "with IRQ\n"); 2154 } 2155 2156 /* MSI-X is the only case the doesn't need to call request_irq */ 2157 if (phba->intr_type != MSIX) { 2158 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2159 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2160 if (retval) { 2161 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable " 2162 "interrupt handler failed\n"); 2163 error = retval; 2164 goto out_disable_msi; 2165 } else if (phba->intr_type != MSI) 2166 phba->intr_type = INTx; 2167 } 2168 2169 phba->MBslimaddr = phba->slim_memmap_p; 2170 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2171 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 2172 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2173 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2174 2175 if (lpfc_alloc_sysfs_attr(vport)) { 2176 error = -ENOMEM; 2177 goto out_free_irq; 2178 } 2179 2180 if (lpfc_sli_hba_setup(phba)) { 2181 error = -ENODEV; 2182 goto out_remove_device; 2183 } 2184 2185 /* 2186 * hba setup may have changed the hba_queue_depth so we need to adjust 2187 * the value of can_queue. 2188 */ 2189 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2190 2191 lpfc_host_attrib_init(shost); 2192 2193 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 2194 spin_lock_irq(shost->host_lock); 2195 lpfc_poll_start_timer(phba); 2196 spin_unlock_irq(shost->host_lock); 2197 } 2198 2199 scsi_scan_host(shost); 2200 2201 return 0; 2202 2203 out_remove_device: 2204 lpfc_free_sysfs_attr(vport); 2205 spin_lock_irq(shost->host_lock); 2206 vport->load_flag |= FC_UNLOADING; 2207 spin_unlock_irq(shost->host_lock); 2208 out_free_irq: 2209 lpfc_stop_phba_timers(phba); 2210 phba->pport->work_port_events = 0; 2211 2212 if (phba->intr_type == MSIX) 2213 lpfc_disable_msix(phba); 2214 else 2215 free_irq(phba->pcidev->irq, phba); 2216 2217 out_disable_msi: 2218 if (phba->intr_type == MSI) 2219 pci_disable_msi(phba->pcidev); 2220 destroy_port(vport); 2221 out_kthread_stop: 2222 kthread_stop(phba->worker_thread); 2223 out_free_iocbq: 2224 list_for_each_entry_safe(iocbq_entry, iocbq_next, 2225 &phba->lpfc_iocb_list, list) { 2226 kfree(iocbq_entry); 2227 phba->total_iocbq_bufs--; 2228 } 2229 lpfc_mem_free(phba); 2230 out_free_hbqslimp: 2231 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2232 phba->hbqslimp.phys); 2233 out_free_slim: 2234 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 2235 phba->slim2p_mapping); 2236 out_iounmap: 2237 iounmap(phba->ctrl_regs_memmap_p); 2238 out_iounmap_slim: 2239 iounmap(phba->slim_memmap_p); 2240 out_idr_remove: 2241 idr_remove(&lpfc_hba_index, phba->brd_no); 2242 out_free_phba: 2243 kfree(phba); 2244 out_release_regions: 2245 pci_release_selected_regions(pdev, bars); 2246 out_disable_device: 2247 pci_disable_device(pdev); 2248 out: 2249 pci_set_drvdata(pdev, NULL); 2250 if (shost) 2251 scsi_host_put(shost); 2252 return error; 2253 } 2254 2255 static void __devexit 2256 lpfc_pci_remove_one(struct pci_dev *pdev) 2257 { 2258 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2259 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2260 struct lpfc_hba *phba = vport->phba; 2261 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2262 2263 spin_lock_irq(&phba->hbalock); 2264 vport->load_flag |= FC_UNLOADING; 2265 spin_unlock_irq(&phba->hbalock); 2266 2267 kfree(vport->vname); 2268 lpfc_free_sysfs_attr(vport); 2269 2270 kthread_stop(phba->worker_thread); 2271 2272 fc_remove_host(shost); 2273 scsi_remove_host(shost); 2274 lpfc_cleanup(vport); 2275 2276 /* 2277 * Bring down the SLI Layer. This step disable all interrupts, 2278 * clears the rings, discards all mailbox commands, and resets 2279 * the HBA. 2280 */ 2281 lpfc_sli_hba_down(phba); 2282 lpfc_sli_brdrestart(phba); 2283 2284 lpfc_stop_phba_timers(phba); 2285 spin_lock_irq(&phba->hbalock); 2286 list_del_init(&vport->listentry); 2287 spin_unlock_irq(&phba->hbalock); 2288 2289 lpfc_debugfs_terminate(vport); 2290 2291 if (phba->intr_type == MSIX) 2292 lpfc_disable_msix(phba); 2293 else { 2294 free_irq(phba->pcidev->irq, phba); 2295 if (phba->intr_type == MSI) 2296 pci_disable_msi(phba->pcidev); 2297 } 2298 2299 pci_set_drvdata(pdev, NULL); 2300 scsi_host_put(shost); 2301 2302 /* 2303 * Call scsi_free before mem_free since scsi bufs are released to their 2304 * corresponding pools here. 2305 */ 2306 lpfc_scsi_free(phba); 2307 lpfc_mem_free(phba); 2308 2309 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, 2310 phba->hbqslimp.phys); 2311 2312 /* Free resources associated with SLI2 interface */ 2313 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 2314 phba->slim2p, phba->slim2p_mapping); 2315 2316 /* unmap adapter SLIM and Control Registers */ 2317 iounmap(phba->ctrl_regs_memmap_p); 2318 iounmap(phba->slim_memmap_p); 2319 2320 idr_remove(&lpfc_hba_index, phba->brd_no); 2321 2322 kfree(phba); 2323 2324 pci_release_selected_regions(pdev, bars); 2325 pci_disable_device(pdev); 2326 } 2327 2328 /** 2329 * lpfc_io_error_detected - called when PCI error is detected 2330 * @pdev: Pointer to PCI device 2331 * @state: The current pci conneection state 2332 * 2333 * This function is called after a PCI bus error affecting 2334 * this device has been detected. 2335 */ 2336 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 2337 pci_channel_state_t state) 2338 { 2339 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2340 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2341 struct lpfc_sli *psli = &phba->sli; 2342 struct lpfc_sli_ring *pring; 2343 2344 if (state == pci_channel_io_perm_failure) 2345 return PCI_ERS_RESULT_DISCONNECT; 2346 2347 pci_disable_device(pdev); 2348 /* 2349 * There may be I/Os dropped by the firmware. 2350 * Error iocb (I/O) on txcmplq and let the SCSI layer 2351 * retry it after re-establishing link. 2352 */ 2353 pring = &psli->ring[psli->fcp_ring]; 2354 lpfc_sli_abort_iocb_ring(phba, pring); 2355 2356 if (phba->intr_type == MSIX) 2357 lpfc_disable_msix(phba); 2358 else { 2359 free_irq(phba->pcidev->irq, phba); 2360 if (phba->intr_type == MSI) 2361 pci_disable_msi(phba->pcidev); 2362 } 2363 2364 /* Request a slot reset. */ 2365 return PCI_ERS_RESULT_NEED_RESET; 2366 } 2367 2368 /** 2369 * lpfc_io_slot_reset - called after the pci bus has been reset. 2370 * @pdev: Pointer to PCI device 2371 * 2372 * Restart the card from scratch, as if from a cold-boot. 2373 */ 2374 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 2375 { 2376 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2378 struct lpfc_sli *psli = &phba->sli; 2379 int error, retval; 2380 2381 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 2382 if (pci_enable_device_mem(pdev)) { 2383 printk(KERN_ERR "lpfc: Cannot re-enable " 2384 "PCI device after reset.\n"); 2385 return PCI_ERS_RESULT_DISCONNECT; 2386 } 2387 2388 pci_set_master(pdev); 2389 2390 spin_lock_irq(&phba->hbalock); 2391 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2392 spin_unlock_irq(&phba->hbalock); 2393 2394 /* Enable configured interrupt method */ 2395 phba->intr_type = NONE; 2396 if (phba->cfg_use_msi == 2) { 2397 error = lpfc_enable_msix(phba); 2398 if (!error) 2399 phba->intr_type = MSIX; 2400 } 2401 2402 /* Fallback to MSI if MSI-X initialization failed */ 2403 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) { 2404 retval = pci_enable_msi(phba->pcidev); 2405 if (!retval) 2406 phba->intr_type = MSI; 2407 else 2408 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2409 "0470 Enable MSI failed, continuing " 2410 "with IRQ\n"); 2411 } 2412 2413 /* MSI-X is the only case the doesn't need to call request_irq */ 2414 if (phba->intr_type != MSIX) { 2415 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2416 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2417 if (retval) { 2418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2419 "0471 Enable interrupt handler " 2420 "failed\n"); 2421 } else if (phba->intr_type != MSI) 2422 phba->intr_type = INTx; 2423 } 2424 2425 /* Take device offline; this will perform cleanup */ 2426 lpfc_offline(phba); 2427 lpfc_sli_brdrestart(phba); 2428 2429 return PCI_ERS_RESULT_RECOVERED; 2430 } 2431 2432 /** 2433 * lpfc_io_resume - called when traffic can start flowing again. 2434 * @pdev: Pointer to PCI device 2435 * 2436 * This callback is called when the error recovery driver tells us that 2437 * its OK to resume normal operation. 2438 */ 2439 static void lpfc_io_resume(struct pci_dev *pdev) 2440 { 2441 struct Scsi_Host *shost = pci_get_drvdata(pdev); 2442 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 2443 2444 lpfc_online(phba); 2445 } 2446 2447 static struct pci_device_id lpfc_id_table[] = { 2448 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 2449 PCI_ANY_ID, PCI_ANY_ID, }, 2450 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 2451 PCI_ANY_ID, PCI_ANY_ID, }, 2452 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 2453 PCI_ANY_ID, PCI_ANY_ID, }, 2454 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 2455 PCI_ANY_ID, PCI_ANY_ID, }, 2456 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 2457 PCI_ANY_ID, PCI_ANY_ID, }, 2458 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 2459 PCI_ANY_ID, PCI_ANY_ID, }, 2460 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 2461 PCI_ANY_ID, PCI_ANY_ID, }, 2462 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 2463 PCI_ANY_ID, PCI_ANY_ID, }, 2464 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 2465 PCI_ANY_ID, PCI_ANY_ID, }, 2466 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 2467 PCI_ANY_ID, PCI_ANY_ID, }, 2468 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 2469 PCI_ANY_ID, PCI_ANY_ID, }, 2470 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 2471 PCI_ANY_ID, PCI_ANY_ID, }, 2472 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 2473 PCI_ANY_ID, PCI_ANY_ID, }, 2474 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 2475 PCI_ANY_ID, PCI_ANY_ID, }, 2476 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 2477 PCI_ANY_ID, PCI_ANY_ID, }, 2478 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 2479 PCI_ANY_ID, PCI_ANY_ID, }, 2480 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 2481 PCI_ANY_ID, PCI_ANY_ID, }, 2482 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 2483 PCI_ANY_ID, PCI_ANY_ID, }, 2484 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 2485 PCI_ANY_ID, PCI_ANY_ID, }, 2486 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 2487 PCI_ANY_ID, PCI_ANY_ID, }, 2488 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 2489 PCI_ANY_ID, PCI_ANY_ID, }, 2490 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 2491 PCI_ANY_ID, PCI_ANY_ID, }, 2492 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 2493 PCI_ANY_ID, PCI_ANY_ID, }, 2494 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 2495 PCI_ANY_ID, PCI_ANY_ID, }, 2496 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 2497 PCI_ANY_ID, PCI_ANY_ID, }, 2498 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 2499 PCI_ANY_ID, PCI_ANY_ID, }, 2500 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 2501 PCI_ANY_ID, PCI_ANY_ID, }, 2502 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 2503 PCI_ANY_ID, PCI_ANY_ID, }, 2504 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 2505 PCI_ANY_ID, PCI_ANY_ID, }, 2506 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 2507 PCI_ANY_ID, PCI_ANY_ID, }, 2508 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 2509 PCI_ANY_ID, PCI_ANY_ID, }, 2510 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 2511 PCI_ANY_ID, PCI_ANY_ID, }, 2512 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 2513 PCI_ANY_ID, PCI_ANY_ID, }, 2514 { 0 } 2515 }; 2516 2517 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 2518 2519 static struct pci_error_handlers lpfc_err_handler = { 2520 .error_detected = lpfc_io_error_detected, 2521 .slot_reset = lpfc_io_slot_reset, 2522 .resume = lpfc_io_resume, 2523 }; 2524 2525 static struct pci_driver lpfc_driver = { 2526 .name = LPFC_DRIVER_NAME, 2527 .id_table = lpfc_id_table, 2528 .probe = lpfc_pci_probe_one, 2529 .remove = __devexit_p(lpfc_pci_remove_one), 2530 .err_handler = &lpfc_err_handler, 2531 }; 2532 2533 static int __init 2534 lpfc_init(void) 2535 { 2536 int error = 0; 2537 2538 printk(LPFC_MODULE_DESC "\n"); 2539 printk(LPFC_COPYRIGHT "\n"); 2540 2541 if (lpfc_enable_npiv) { 2542 lpfc_transport_functions.vport_create = lpfc_vport_create; 2543 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 2544 } 2545 lpfc_transport_template = 2546 fc_attach_transport(&lpfc_transport_functions); 2547 if (lpfc_transport_template == NULL) 2548 return -ENOMEM; 2549 if (lpfc_enable_npiv) { 2550 lpfc_vport_transport_template = 2551 fc_attach_transport(&lpfc_vport_transport_functions); 2552 if (lpfc_vport_transport_template == NULL) { 2553 fc_release_transport(lpfc_transport_template); 2554 return -ENOMEM; 2555 } 2556 } 2557 error = pci_register_driver(&lpfc_driver); 2558 if (error) { 2559 fc_release_transport(lpfc_transport_template); 2560 fc_release_transport(lpfc_vport_transport_template); 2561 } 2562 2563 return error; 2564 } 2565 2566 static void __exit 2567 lpfc_exit(void) 2568 { 2569 pci_unregister_driver(&lpfc_driver); 2570 fc_release_transport(lpfc_transport_template); 2571 if (lpfc_enable_npiv) 2572 fc_release_transport(lpfc_vport_transport_template); 2573 } 2574 2575 module_init(lpfc_init); 2576 module_exit(lpfc_exit); 2577 MODULE_LICENSE("GPL"); 2578 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 2579 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 2580 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 2581