1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_transport_fc.h> 35 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_disc.h" 39 #include "lpfc_scsi.h" 40 #include "lpfc.h" 41 #include "lpfc_logmsg.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_version.h" 44 45 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 46 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 47 static int lpfc_post_rcv_buf(struct lpfc_hba *); 48 49 static struct scsi_transport_template *lpfc_transport_template = NULL; 50 static DEFINE_IDR(lpfc_hba_index); 51 52 /************************************************************************/ 53 /* */ 54 /* lpfc_config_port_prep */ 55 /* This routine will do LPFC initialization prior to the */ 56 /* CONFIG_PORT mailbox command. This will be initialized */ 57 /* as a SLI layer callback routine. */ 58 /* This routine returns 0 on success or -ERESTART if it wants */ 59 /* the SLI layer to reset the HBA and try again. Any */ 60 /* other return value indicates an error. */ 61 /* */ 62 /************************************************************************/ 63 int 64 lpfc_config_port_prep(struct lpfc_hba * phba) 65 { 66 lpfc_vpd_t *vp = &phba->vpd; 67 int i = 0, rc; 68 LPFC_MBOXQ_t *pmb; 69 MAILBOX_t *mb; 70 char *lpfc_vpd_data = NULL; 71 uint16_t offset = 0; 72 static char licensed[56] = 73 "key unlock for use with gnu public licensed code only\0"; 74 75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 if (!pmb) { 77 phba->hba_state = LPFC_HBA_ERROR; 78 return -ENOMEM; 79 } 80 81 mb = &pmb->mb; 82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 uint32_t *ptext = (uint32_t *) licensed; 86 87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 88 *ptext = cpu_to_be32(*ptext); 89 90 lpfc_read_nv(phba, pmb); 91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 92 sizeof (mb->un.varRDnvp.rsvd3)); 93 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 94 sizeof (licensed)); 95 96 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 97 98 if (rc != MBX_SUCCESS) { 99 lpfc_printf_log(phba, 100 KERN_ERR, 101 LOG_MBOX, 102 "%d:0324 Config Port initialization " 103 "error, mbxCmd x%x READ_NVPARM, " 104 "mbxStatus x%x\n", 105 phba->brd_no, 106 mb->mbxCommand, mb->mbxStatus); 107 mempool_free(pmb, phba->mbox_mem_pool); 108 return -ERESTART; 109 } 110 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 111 sizeof (mb->un.varRDnvp.nodename)); 112 } 113 114 /* Setup and issue mailbox READ REV command */ 115 lpfc_read_rev(phba, pmb); 116 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 117 if (rc != MBX_SUCCESS) { 118 lpfc_printf_log(phba, 119 KERN_ERR, 120 LOG_INIT, 121 "%d:0439 Adapter failed to init, mbxCmd x%x " 122 "READ_REV, mbxStatus x%x\n", 123 phba->brd_no, 124 mb->mbxCommand, mb->mbxStatus); 125 mempool_free( pmb, phba->mbox_mem_pool); 126 return -ERESTART; 127 } 128 129 /* 130 * The value of rr must be 1 since the driver set the cv field to 1. 131 * This setting requires the FW to set all revision fields. 132 */ 133 if (mb->un.varRdRev.rr == 0) { 134 vp->rev.rBit = 0; 135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 136 "%d:0440 Adapter failed to init, READ_REV has " 137 "missing revision information.\n", 138 phba->brd_no); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 143 /* Save information as VPD data */ 144 vp->rev.rBit = 1; 145 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 146 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 147 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 148 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 149 vp->rev.biuRev = mb->un.varRdRev.biuRev; 150 vp->rev.smRev = mb->un.varRdRev.smRev; 151 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 152 vp->rev.endecRev = mb->un.varRdRev.endecRev; 153 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 154 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 155 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 156 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 157 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 158 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 159 160 if (lpfc_is_LC_HBA(phba->pcidev->device)) 161 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 162 sizeof (phba->RandomData)); 163 164 /* Get adapter VPD information */ 165 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 166 if (!pmb->context2) 167 goto out_free_mbox; 168 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 169 if (!lpfc_vpd_data) 170 goto out_free_context2; 171 172 do { 173 lpfc_dump_mem(phba, pmb, offset); 174 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 175 176 if (rc != MBX_SUCCESS) { 177 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 178 "%d:0441 VPD not present on adapter, " 179 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 180 phba->brd_no, 181 mb->mbxCommand, mb->mbxStatus); 182 mb->un.varDmp.word_cnt = 0; 183 } 184 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 185 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 186 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 187 mb->un.varDmp.word_cnt); 188 offset += mb->un.varDmp.word_cnt; 189 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 190 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 191 192 kfree(lpfc_vpd_data); 193 out_free_context2: 194 kfree(pmb->context2); 195 out_free_mbox: 196 mempool_free(pmb, phba->mbox_mem_pool); 197 return 0; 198 } 199 200 /************************************************************************/ 201 /* */ 202 /* lpfc_config_port_post */ 203 /* This routine will do LPFC initialization after the */ 204 /* CONFIG_PORT mailbox command. This will be initialized */ 205 /* as a SLI layer callback routine. */ 206 /* This routine returns 0 on success. Any other return value */ 207 /* indicates an error. */ 208 /* */ 209 /************************************************************************/ 210 int 211 lpfc_config_port_post(struct lpfc_hba * phba) 212 { 213 LPFC_MBOXQ_t *pmb; 214 MAILBOX_t *mb; 215 struct lpfc_dmabuf *mp; 216 struct lpfc_sli *psli = &phba->sli; 217 uint32_t status, timeout; 218 int i, j, rc; 219 220 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 221 if (!pmb) { 222 phba->hba_state = LPFC_HBA_ERROR; 223 return -ENOMEM; 224 } 225 mb = &pmb->mb; 226 227 lpfc_config_link(phba, pmb); 228 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 229 if (rc != MBX_SUCCESS) { 230 lpfc_printf_log(phba, 231 KERN_ERR, 232 LOG_INIT, 233 "%d:0447 Adapter failed init, mbxCmd x%x " 234 "CONFIG_LINK mbxStatus x%x\n", 235 phba->brd_no, 236 mb->mbxCommand, mb->mbxStatus); 237 phba->hba_state = LPFC_HBA_ERROR; 238 mempool_free( pmb, phba->mbox_mem_pool); 239 return -EIO; 240 } 241 242 /* Get login parameters for NID. */ 243 lpfc_read_sparam(phba, pmb); 244 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 245 lpfc_printf_log(phba, 246 KERN_ERR, 247 LOG_INIT, 248 "%d:0448 Adapter failed init, mbxCmd x%x " 249 "READ_SPARM mbxStatus x%x\n", 250 phba->brd_no, 251 mb->mbxCommand, mb->mbxStatus); 252 phba->hba_state = LPFC_HBA_ERROR; 253 mp = (struct lpfc_dmabuf *) pmb->context1; 254 mempool_free( pmb, phba->mbox_mem_pool); 255 lpfc_mbuf_free(phba, mp->virt, mp->phys); 256 kfree(mp); 257 return -EIO; 258 } 259 260 mp = (struct lpfc_dmabuf *) pmb->context1; 261 262 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 263 lpfc_mbuf_free(phba, mp->virt, mp->phys); 264 kfree(mp); 265 pmb->context1 = NULL; 266 267 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 268 sizeof (struct lpfc_name)); 269 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 270 sizeof (struct lpfc_name)); 271 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 272 /* This should be consolidated into parse_vpd ? - mr */ 273 if (phba->SerialNumber[0] == 0) { 274 uint8_t *outptr; 275 276 outptr = &phba->fc_nodename.u.s.IEEE[0]; 277 for (i = 0; i < 12; i++) { 278 status = *outptr++; 279 j = ((status & 0xf0) >> 4); 280 if (j <= 9) 281 phba->SerialNumber[i] = 282 (char)((uint8_t) 0x30 + (uint8_t) j); 283 else 284 phba->SerialNumber[i] = 285 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 286 i++; 287 j = (status & 0xf); 288 if (j <= 9) 289 phba->SerialNumber[i] = 290 (char)((uint8_t) 0x30 + (uint8_t) j); 291 else 292 phba->SerialNumber[i] = 293 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 294 } 295 } 296 297 lpfc_read_config(phba, pmb); 298 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 299 lpfc_printf_log(phba, 300 KERN_ERR, 301 LOG_INIT, 302 "%d:0453 Adapter failed to init, mbxCmd x%x " 303 "READ_CONFIG, mbxStatus x%x\n", 304 phba->brd_no, 305 mb->mbxCommand, mb->mbxStatus); 306 phba->hba_state = LPFC_HBA_ERROR; 307 mempool_free( pmb, phba->mbox_mem_pool); 308 return -EIO; 309 } 310 311 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 312 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 313 phba->cfg_hba_queue_depth = 314 mb->un.varRdConfig.max_xri + 1; 315 316 phba->lmt = mb->un.varRdConfig.lmt; 317 318 /* Get the default values for Model Name and Description */ 319 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 320 321 if ((phba->cfg_link_speed > LINK_SPEED_10G) 322 || ((phba->cfg_link_speed == LINK_SPEED_1G) 323 && !(phba->lmt & LMT_1Gb)) 324 || ((phba->cfg_link_speed == LINK_SPEED_2G) 325 && !(phba->lmt & LMT_2Gb)) 326 || ((phba->cfg_link_speed == LINK_SPEED_4G) 327 && !(phba->lmt & LMT_4Gb)) 328 || ((phba->cfg_link_speed == LINK_SPEED_8G) 329 && !(phba->lmt & LMT_8Gb)) 330 || ((phba->cfg_link_speed == LINK_SPEED_10G) 331 && !(phba->lmt & LMT_10Gb))) { 332 /* Reset link speed to auto */ 333 lpfc_printf_log(phba, 334 KERN_WARNING, 335 LOG_LINK_EVENT, 336 "%d:1302 Invalid speed for this board: " 337 "Reset link speed to auto: x%x\n", 338 phba->brd_no, 339 phba->cfg_link_speed); 340 phba->cfg_link_speed = LINK_SPEED_AUTO; 341 } 342 343 phba->hba_state = LPFC_LINK_DOWN; 344 345 /* Only process IOCBs on ring 0 till hba_state is READY */ 346 if (psli->ring[psli->ip_ring].cmdringaddr) 347 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; 348 if (psli->ring[psli->fcp_ring].cmdringaddr) 349 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 350 if (psli->ring[psli->next_ring].cmdringaddr) 351 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 352 353 /* Post receive buffers for desired rings */ 354 lpfc_post_rcv_buf(phba); 355 356 /* Enable appropriate host interrupts */ 357 spin_lock_irq(phba->host->host_lock); 358 status = readl(phba->HCregaddr); 359 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 360 if (psli->num_rings > 0) 361 status |= HC_R0INT_ENA; 362 if (psli->num_rings > 1) 363 status |= HC_R1INT_ENA; 364 if (psli->num_rings > 2) 365 status |= HC_R2INT_ENA; 366 if (psli->num_rings > 3) 367 status |= HC_R3INT_ENA; 368 369 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 370 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 371 status &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 372 373 writel(status, phba->HCregaddr); 374 readl(phba->HCregaddr); /* flush */ 375 spin_unlock_irq(phba->host->host_lock); 376 377 /* 378 * Setup the ring 0 (els) timeout handler 379 */ 380 timeout = phba->fc_ratov << 1; 381 phba->els_tmofunc.expires = jiffies + HZ * timeout; 382 add_timer(&phba->els_tmofunc); 383 384 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 385 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 386 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) { 387 lpfc_printf_log(phba, 388 KERN_ERR, 389 LOG_INIT, 390 "%d:0454 Adapter failed to init, mbxCmd x%x " 391 "INIT_LINK, mbxStatus x%x\n", 392 phba->brd_no, 393 mb->mbxCommand, mb->mbxStatus); 394 395 /* Clear all interrupt enable conditions */ 396 writel(0, phba->HCregaddr); 397 readl(phba->HCregaddr); /* flush */ 398 /* Clear all pending interrupts */ 399 writel(0xffffffff, phba->HAregaddr); 400 readl(phba->HAregaddr); /* flush */ 401 402 phba->hba_state = LPFC_HBA_ERROR; 403 mempool_free(pmb, phba->mbox_mem_pool); 404 return -EIO; 405 } 406 /* MBOX buffer will be freed in mbox compl */ 407 408 i = 0; 409 while ((phba->hba_state != LPFC_HBA_READY) || 410 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 411 ((phba->fc_map_cnt == 0) && (i<2)) || 412 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 413 /* Check every second for 30 retries. */ 414 i++; 415 if (i > 30) { 416 break; 417 } 418 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 419 /* The link is down. Set linkdown timeout */ 420 break; 421 } 422 423 /* Delay for 1 second to give discovery time to complete. */ 424 msleep(1000); 425 426 } 427 428 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 429 * any potential PRLIs to flush thru the SLI sub-system. 430 */ 431 msleep(50); 432 433 return (0); 434 } 435 436 /************************************************************************/ 437 /* */ 438 /* lpfc_hba_down_prep */ 439 /* This routine will do LPFC uninitialization before the */ 440 /* HBA is reset when bringing down the SLI Layer. This will be */ 441 /* initialized as a SLI layer callback routine. */ 442 /* This routine returns 0 on success. Any other return value */ 443 /* indicates an error. */ 444 /* */ 445 /************************************************************************/ 446 int 447 lpfc_hba_down_prep(struct lpfc_hba * phba) 448 { 449 /* Disable interrupts */ 450 writel(0, phba->HCregaddr); 451 readl(phba->HCregaddr); /* flush */ 452 453 /* Cleanup potential discovery resources */ 454 lpfc_els_flush_rscn(phba); 455 lpfc_els_flush_cmd(phba); 456 lpfc_disc_flush_list(phba); 457 458 return (0); 459 } 460 461 /************************************************************************/ 462 /* */ 463 /* lpfc_hba_down_post */ 464 /* This routine will do uninitialization after the HBA is reset */ 465 /* when bringing down the SLI Layer. */ 466 /* This routine returns 0 on success. Any other return value */ 467 /* indicates an error. */ 468 /* */ 469 /************************************************************************/ 470 int 471 lpfc_hba_down_post(struct lpfc_hba * phba) 472 { 473 struct lpfc_sli *psli = &phba->sli; 474 struct lpfc_sli_ring *pring; 475 struct lpfc_dmabuf *mp, *next_mp; 476 int i; 477 478 /* Cleanup preposted buffers on the ELS ring */ 479 pring = &psli->ring[LPFC_ELS_RING]; 480 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 481 list_del(&mp->list); 482 pring->postbufq_cnt--; 483 lpfc_mbuf_free(phba, mp->virt, mp->phys); 484 kfree(mp); 485 } 486 487 for (i = 0; i < psli->num_rings; i++) { 488 pring = &psli->ring[i]; 489 lpfc_sli_abort_iocb_ring(phba, pring); 490 } 491 492 return 0; 493 } 494 495 /************************************************************************/ 496 /* */ 497 /* lpfc_handle_eratt */ 498 /* This routine will handle processing a Host Attention */ 499 /* Error Status event. This will be initialized */ 500 /* as a SLI layer callback routine. */ 501 /* */ 502 /************************************************************************/ 503 void 504 lpfc_handle_eratt(struct lpfc_hba * phba) 505 { 506 struct lpfc_sli *psli = &phba->sli; 507 struct lpfc_sli_ring *pring; 508 509 if (phba->work_hs & HS_FFER6) { 510 /* Re-establishing Link */ 511 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 512 "%d:1301 Re-establishing Link " 513 "Data: x%x x%x x%x\n", 514 phba->brd_no, phba->work_hs, 515 phba->work_status[0], phba->work_status[1]); 516 spin_lock_irq(phba->host->host_lock); 517 phba->fc_flag |= FC_ESTABLISH_LINK; 518 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 519 spin_unlock_irq(phba->host->host_lock); 520 521 /* 522 * Firmware stops when it triggled erratt with HS_FFER6. 523 * That could cause the I/Os dropped by the firmware. 524 * Error iocb (I/O) on txcmplq and let the SCSI layer 525 * retry it after re-establishing link. 526 */ 527 pring = &psli->ring[psli->fcp_ring]; 528 lpfc_sli_abort_iocb_ring(phba, pring); 529 530 531 /* 532 * There was a firmware error. Take the hba offline and then 533 * attempt to restart it. 534 */ 535 lpfc_offline(phba); 536 lpfc_sli_brdrestart(phba); 537 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 538 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 539 return; 540 } 541 } else { 542 /* The if clause above forces this code path when the status 543 * failure is a value other than FFER6. Do not call the offline 544 * twice. This is the adapter hardware error path. 545 */ 546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 547 "%d:0457 Adapter Hardware Error " 548 "Data: x%x x%x x%x\n", 549 phba->brd_no, phba->work_hs, 550 phba->work_status[0], phba->work_status[1]); 551 552 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 553 lpfc_offline(phba); 554 phba->hba_state = LPFC_HBA_ERROR; 555 lpfc_hba_down_post(phba); 556 } 557 } 558 559 /************************************************************************/ 560 /* */ 561 /* lpfc_handle_latt */ 562 /* This routine will handle processing a Host Attention */ 563 /* Link Status event. This will be initialized */ 564 /* as a SLI layer callback routine. */ 565 /* */ 566 /************************************************************************/ 567 void 568 lpfc_handle_latt(struct lpfc_hba * phba) 569 { 570 struct lpfc_sli *psli = &phba->sli; 571 LPFC_MBOXQ_t *pmb; 572 volatile uint32_t control; 573 struct lpfc_dmabuf *mp; 574 int rc = -ENOMEM; 575 576 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 577 if (!pmb) 578 goto lpfc_handle_latt_err_exit; 579 580 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 581 if (!mp) 582 goto lpfc_handle_latt_free_pmb; 583 584 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 585 if (!mp->virt) 586 goto lpfc_handle_latt_free_mp; 587 588 rc = -EIO; 589 590 /* Cleanup any outstanding ELS commands */ 591 lpfc_els_flush_cmd(phba); 592 593 psli->slistat.link_event++; 594 lpfc_read_la(phba, pmb, mp); 595 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 596 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 597 if (rc == MBX_NOT_FINISHED) 598 goto lpfc_handle_latt_free_mp; 599 600 /* Clear Link Attention in HA REG */ 601 spin_lock_irq(phba->host->host_lock); 602 writel(HA_LATT, phba->HAregaddr); 603 readl(phba->HAregaddr); /* flush */ 604 spin_unlock_irq(phba->host->host_lock); 605 606 return; 607 608 lpfc_handle_latt_free_mp: 609 kfree(mp); 610 lpfc_handle_latt_free_pmb: 611 kfree(pmb); 612 lpfc_handle_latt_err_exit: 613 /* Enable Link attention interrupts */ 614 spin_lock_irq(phba->host->host_lock); 615 psli->sli_flag |= LPFC_PROCESS_LA; 616 control = readl(phba->HCregaddr); 617 control |= HC_LAINT_ENA; 618 writel(control, phba->HCregaddr); 619 readl(phba->HCregaddr); /* flush */ 620 621 /* Clear Link Attention in HA REG */ 622 writel(HA_LATT, phba->HAregaddr); 623 readl(phba->HAregaddr); /* flush */ 624 spin_unlock_irq(phba->host->host_lock); 625 lpfc_linkdown(phba); 626 phba->hba_state = LPFC_HBA_ERROR; 627 628 /* The other case is an error from issue_mbox */ 629 if (rc == -ENOMEM) 630 lpfc_printf_log(phba, 631 KERN_WARNING, 632 LOG_MBOX, 633 "%d:0300 READ_LA: no buffers\n", 634 phba->brd_no); 635 636 return; 637 } 638 639 /************************************************************************/ 640 /* */ 641 /* lpfc_parse_vpd */ 642 /* This routine will parse the VPD data */ 643 /* */ 644 /************************************************************************/ 645 static int 646 lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 647 { 648 uint8_t lenlo, lenhi; 649 uint32_t Length; 650 int i, j; 651 int finished = 0; 652 int index = 0; 653 654 if (!vpd) 655 return 0; 656 657 /* Vital Product */ 658 lpfc_printf_log(phba, 659 KERN_INFO, 660 LOG_INIT, 661 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 662 phba->brd_no, 663 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 664 (uint32_t) vpd[3]); 665 while (!finished && (index < (len - 4))) { 666 switch (vpd[index]) { 667 case 0x82: 668 case 0x91: 669 index += 1; 670 lenlo = vpd[index]; 671 index += 1; 672 lenhi = vpd[index]; 673 index += 1; 674 i = ((((unsigned short)lenhi) << 8) + lenlo); 675 index += i; 676 break; 677 case 0x90: 678 index += 1; 679 lenlo = vpd[index]; 680 index += 1; 681 lenhi = vpd[index]; 682 index += 1; 683 Length = ((((unsigned short)lenhi) << 8) + lenlo); 684 if (Length > len - index) 685 Length = len - index; 686 while (Length > 0) { 687 /* Look for Serial Number */ 688 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 689 index += 2; 690 i = vpd[index]; 691 index += 1; 692 j = 0; 693 Length -= (3+i); 694 while(i--) { 695 phba->SerialNumber[j++] = vpd[index++]; 696 if (j == 31) 697 break; 698 } 699 phba->SerialNumber[j] = 0; 700 continue; 701 } 702 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 703 phba->vpd_flag |= VPD_MODEL_DESC; 704 index += 2; 705 i = vpd[index]; 706 index += 1; 707 j = 0; 708 Length -= (3+i); 709 while(i--) { 710 phba->ModelDesc[j++] = vpd[index++]; 711 if (j == 255) 712 break; 713 } 714 phba->ModelDesc[j] = 0; 715 continue; 716 } 717 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 718 phba->vpd_flag |= VPD_MODEL_NAME; 719 index += 2; 720 i = vpd[index]; 721 index += 1; 722 j = 0; 723 Length -= (3+i); 724 while(i--) { 725 phba->ModelName[j++] = vpd[index++]; 726 if (j == 79) 727 break; 728 } 729 phba->ModelName[j] = 0; 730 continue; 731 } 732 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 733 phba->vpd_flag |= VPD_PROGRAM_TYPE; 734 index += 2; 735 i = vpd[index]; 736 index += 1; 737 j = 0; 738 Length -= (3+i); 739 while(i--) { 740 phba->ProgramType[j++] = vpd[index++]; 741 if (j == 255) 742 break; 743 } 744 phba->ProgramType[j] = 0; 745 continue; 746 } 747 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 748 phba->vpd_flag |= VPD_PORT; 749 index += 2; 750 i = vpd[index]; 751 index += 1; 752 j = 0; 753 Length -= (3+i); 754 while(i--) { 755 phba->Port[j++] = vpd[index++]; 756 if (j == 19) 757 break; 758 } 759 phba->Port[j] = 0; 760 continue; 761 } 762 else { 763 index += 2; 764 i = vpd[index]; 765 index += 1; 766 index += i; 767 Length -= (3 + i); 768 } 769 } 770 finished = 0; 771 break; 772 case 0x78: 773 finished = 1; 774 break; 775 default: 776 index ++; 777 break; 778 } 779 } 780 781 return(1); 782 } 783 784 static void 785 lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 786 { 787 lpfc_vpd_t *vp; 788 uint16_t dev_id = phba->pcidev->device; 789 uint16_t dev_subid = phba->pcidev->subsystem_device; 790 uint8_t hdrtype; 791 int max_speed; 792 char * ports; 793 struct { 794 char * name; 795 int max_speed; 796 char * ports; 797 char * bus; 798 } m = {"<Unknown>", 0, "", ""}; 799 800 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 801 ports = (hdrtype == 0x80) ? "2-port " : ""; 802 if (mdp && mdp[0] != '\0' 803 && descp && descp[0] != '\0') 804 return; 805 806 if (phba->lmt & LMT_10Gb) 807 max_speed = 10; 808 else if (phba->lmt & LMT_8Gb) 809 max_speed = 8; 810 else if (phba->lmt & LMT_4Gb) 811 max_speed = 4; 812 else if (phba->lmt & LMT_2Gb) 813 max_speed = 2; 814 else 815 max_speed = 1; 816 817 vp = &phba->vpd; 818 819 switch (dev_id) { 820 case PCI_DEVICE_ID_FIREFLY: 821 m = (typeof(m)){"LP6000", max_speed, "", "PCI"}; 822 break; 823 case PCI_DEVICE_ID_SUPERFLY: 824 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 825 m = (typeof(m)){"LP7000", max_speed, "", "PCI"}; 826 else 827 m = (typeof(m)){"LP7000E", max_speed, "", "PCI"}; 828 break; 829 case PCI_DEVICE_ID_DRAGONFLY: 830 m = (typeof(m)){"LP8000", max_speed, "", "PCI"}; 831 break; 832 case PCI_DEVICE_ID_CENTAUR: 833 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 834 m = (typeof(m)){"LP9002", max_speed, "", "PCI"}; 835 else 836 m = (typeof(m)){"LP9000", max_speed, "", "PCI"}; 837 break; 838 case PCI_DEVICE_ID_RFLY: 839 m = (typeof(m)){"LP952", max_speed, "", "PCI"}; 840 break; 841 case PCI_DEVICE_ID_PEGASUS: 842 m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"}; 843 break; 844 case PCI_DEVICE_ID_THOR: 845 if (hdrtype == 0x80) 846 m = (typeof(m)){"LP10000DC", 847 max_speed, ports, "PCI-X"}; 848 else 849 m = (typeof(m)){"LP10000", 850 max_speed, ports, "PCI-X"}; 851 break; 852 case PCI_DEVICE_ID_VIPER: 853 m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"}; 854 break; 855 case PCI_DEVICE_ID_PFLY: 856 m = (typeof(m)){"LP982", max_speed, "", "PCI-X"}; 857 break; 858 case PCI_DEVICE_ID_TFLY: 859 if (hdrtype == 0x80) 860 m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"}; 861 else 862 m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"}; 863 break; 864 case PCI_DEVICE_ID_HELIOS: 865 if (hdrtype == 0x80) 866 m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"}; 867 else 868 m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"}; 869 break; 870 case PCI_DEVICE_ID_HELIOS_SCSP: 871 m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"}; 872 break; 873 case PCI_DEVICE_ID_HELIOS_DCSP: 874 m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"}; 875 break; 876 case PCI_DEVICE_ID_NEPTUNE: 877 if (hdrtype == 0x80) 878 m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"}; 879 else 880 m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"}; 881 break; 882 case PCI_DEVICE_ID_NEPTUNE_SCSP: 883 m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"}; 884 break; 885 case PCI_DEVICE_ID_NEPTUNE_DCSP: 886 m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"}; 887 break; 888 case PCI_DEVICE_ID_BMID: 889 m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"}; 890 break; 891 case PCI_DEVICE_ID_BSMB: 892 m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"}; 893 break; 894 case PCI_DEVICE_ID_ZEPHYR: 895 if (hdrtype == 0x80) 896 m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"}; 897 else 898 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"}; 899 break; 900 case PCI_DEVICE_ID_ZEPHYR_SCSP: 901 m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"}; 902 break; 903 case PCI_DEVICE_ID_ZEPHYR_DCSP: 904 m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"}; 905 break; 906 case PCI_DEVICE_ID_ZMID: 907 m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"}; 908 break; 909 case PCI_DEVICE_ID_ZSMB: 910 m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"}; 911 break; 912 case PCI_DEVICE_ID_LP101: 913 m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"}; 914 break; 915 case PCI_DEVICE_ID_LP10000S: 916 m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"}; 917 break; 918 case PCI_DEVICE_ID_LP11000S: 919 case PCI_DEVICE_ID_LPE11000S: 920 switch (dev_subid) { 921 case PCI_SUBSYSTEM_ID_LP11000S: 922 m = (typeof(m)){"LP11000-S", max_speed, 923 ports, "PCI-X2"}; 924 break; 925 case PCI_SUBSYSTEM_ID_LP11002S: 926 m = (typeof(m)){"LP11002-S", max_speed, 927 ports, "PCI-X2"}; 928 break; 929 case PCI_SUBSYSTEM_ID_LPE11000S: 930 m = (typeof(m)){"LPe11000-S", max_speed, 931 ports, "PCIe"}; 932 break; 933 case PCI_SUBSYSTEM_ID_LPE11002S: 934 m = (typeof(m)){"LPe11002-S", max_speed, 935 ports, "PCIe"}; 936 break; 937 case PCI_SUBSYSTEM_ID_LPE11010S: 938 m = (typeof(m)){"LPe11010-S", max_speed, 939 "10-port ", "PCIe"}; 940 break; 941 default: 942 m = (typeof(m)){ NULL }; 943 break; 944 } 945 break; 946 default: 947 m = (typeof(m)){ NULL }; 948 break; 949 } 950 951 if (mdp && mdp[0] == '\0') 952 snprintf(mdp, 79,"%s", m.name); 953 if (descp && descp[0] == '\0') 954 snprintf(descp, 255, 955 "Emulex %s %dGb %s%s Fibre Channel Adapter", 956 m.name, m.max_speed, m.ports, m.bus); 957 } 958 959 /**************************************************/ 960 /* lpfc_post_buffer */ 961 /* */ 962 /* This routine will post count buffers to the */ 963 /* ring with the QUE_RING_BUF_CN command. This */ 964 /* allows 3 buffers / command to be posted. */ 965 /* Returns the number of buffers NOT posted. */ 966 /**************************************************/ 967 int 968 lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 969 int type) 970 { 971 IOCB_t *icmd; 972 struct lpfc_iocbq *iocb; 973 struct lpfc_dmabuf *mp1, *mp2; 974 975 cnt += pring->missbufcnt; 976 977 /* While there are buffers to post */ 978 while (cnt > 0) { 979 /* Allocate buffer for command iocb */ 980 spin_lock_irq(phba->host->host_lock); 981 iocb = lpfc_sli_get_iocbq(phba); 982 spin_unlock_irq(phba->host->host_lock); 983 if (iocb == NULL) { 984 pring->missbufcnt = cnt; 985 return cnt; 986 } 987 icmd = &iocb->iocb; 988 989 /* 2 buffers can be posted per command */ 990 /* Allocate buffer to post */ 991 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 992 if (mp1) 993 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 994 &mp1->phys); 995 if (mp1 == 0 || mp1->virt == 0) { 996 kfree(mp1); 997 spin_lock_irq(phba->host->host_lock); 998 lpfc_sli_release_iocbq(phba, iocb); 999 spin_unlock_irq(phba->host->host_lock); 1000 pring->missbufcnt = cnt; 1001 return cnt; 1002 } 1003 1004 INIT_LIST_HEAD(&mp1->list); 1005 /* Allocate buffer to post */ 1006 if (cnt > 1) { 1007 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1008 if (mp2) 1009 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1010 &mp2->phys); 1011 if (mp2 == 0 || mp2->virt == 0) { 1012 kfree(mp2); 1013 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1014 kfree(mp1); 1015 spin_lock_irq(phba->host->host_lock); 1016 lpfc_sli_release_iocbq(phba, iocb); 1017 spin_unlock_irq(phba->host->host_lock); 1018 pring->missbufcnt = cnt; 1019 return cnt; 1020 } 1021 1022 INIT_LIST_HEAD(&mp2->list); 1023 } else { 1024 mp2 = NULL; 1025 } 1026 1027 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1028 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1029 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1030 icmd->ulpBdeCount = 1; 1031 cnt--; 1032 if (mp2) { 1033 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1034 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1035 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1036 cnt--; 1037 icmd->ulpBdeCount = 2; 1038 } 1039 1040 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1041 icmd->ulpLe = 1; 1042 1043 spin_lock_irq(phba->host->host_lock); 1044 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1045 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1046 kfree(mp1); 1047 cnt++; 1048 if (mp2) { 1049 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1050 kfree(mp2); 1051 cnt++; 1052 } 1053 lpfc_sli_release_iocbq(phba, iocb); 1054 pring->missbufcnt = cnt; 1055 spin_unlock_irq(phba->host->host_lock); 1056 return cnt; 1057 } 1058 spin_unlock_irq(phba->host->host_lock); 1059 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1060 if (mp2) { 1061 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1062 } 1063 } 1064 pring->missbufcnt = 0; 1065 return 0; 1066 } 1067 1068 /************************************************************************/ 1069 /* */ 1070 /* lpfc_post_rcv_buf */ 1071 /* This routine post initial rcv buffers to the configured rings */ 1072 /* */ 1073 /************************************************************************/ 1074 static int 1075 lpfc_post_rcv_buf(struct lpfc_hba * phba) 1076 { 1077 struct lpfc_sli *psli = &phba->sli; 1078 1079 /* Ring 0, ELS / CT buffers */ 1080 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 1081 /* Ring 2 - FCP no buffers needed */ 1082 1083 return 0; 1084 } 1085 1086 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1087 1088 /************************************************************************/ 1089 /* */ 1090 /* lpfc_sha_init */ 1091 /* */ 1092 /************************************************************************/ 1093 static void 1094 lpfc_sha_init(uint32_t * HashResultPointer) 1095 { 1096 HashResultPointer[0] = 0x67452301; 1097 HashResultPointer[1] = 0xEFCDAB89; 1098 HashResultPointer[2] = 0x98BADCFE; 1099 HashResultPointer[3] = 0x10325476; 1100 HashResultPointer[4] = 0xC3D2E1F0; 1101 } 1102 1103 /************************************************************************/ 1104 /* */ 1105 /* lpfc_sha_iterate */ 1106 /* */ 1107 /************************************************************************/ 1108 static void 1109 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1110 { 1111 int t; 1112 uint32_t TEMP; 1113 uint32_t A, B, C, D, E; 1114 t = 16; 1115 do { 1116 HashWorkingPointer[t] = 1117 S(1, 1118 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1119 8] ^ 1120 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1121 } while (++t <= 79); 1122 t = 0; 1123 A = HashResultPointer[0]; 1124 B = HashResultPointer[1]; 1125 C = HashResultPointer[2]; 1126 D = HashResultPointer[3]; 1127 E = HashResultPointer[4]; 1128 1129 do { 1130 if (t < 20) { 1131 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1132 } else if (t < 40) { 1133 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1134 } else if (t < 60) { 1135 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1136 } else { 1137 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1138 } 1139 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1140 E = D; 1141 D = C; 1142 C = S(30, B); 1143 B = A; 1144 A = TEMP; 1145 } while (++t <= 79); 1146 1147 HashResultPointer[0] += A; 1148 HashResultPointer[1] += B; 1149 HashResultPointer[2] += C; 1150 HashResultPointer[3] += D; 1151 HashResultPointer[4] += E; 1152 1153 } 1154 1155 /************************************************************************/ 1156 /* */ 1157 /* lpfc_challenge_key */ 1158 /* */ 1159 /************************************************************************/ 1160 static void 1161 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1162 { 1163 *HashWorking = (*RandomChallenge ^ *HashWorking); 1164 } 1165 1166 /************************************************************************/ 1167 /* */ 1168 /* lpfc_hba_init */ 1169 /* */ 1170 /************************************************************************/ 1171 void 1172 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1173 { 1174 int t; 1175 uint32_t *HashWorking; 1176 uint32_t *pwwnn = phba->wwnn; 1177 1178 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1179 if (!HashWorking) 1180 return; 1181 1182 memset(HashWorking, 0, (80 * sizeof(uint32_t))); 1183 HashWorking[0] = HashWorking[78] = *pwwnn++; 1184 HashWorking[1] = HashWorking[79] = *pwwnn; 1185 1186 for (t = 0; t < 7; t++) 1187 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1188 1189 lpfc_sha_init(hbainit); 1190 lpfc_sha_iterate(hbainit, HashWorking); 1191 kfree(HashWorking); 1192 } 1193 1194 static void 1195 lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind) 1196 { 1197 struct lpfc_nodelist *ndlp, *next_ndlp; 1198 1199 /* clean up phba - lpfc specific */ 1200 lpfc_can_disctmo(phba); 1201 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 1202 nlp_listp) { 1203 lpfc_nlp_remove(phba, ndlp); 1204 } 1205 1206 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list, 1207 nlp_listp) { 1208 lpfc_nlp_remove(phba, ndlp); 1209 } 1210 1211 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list, 1212 nlp_listp) { 1213 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1214 } 1215 1216 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list, 1217 nlp_listp) { 1218 lpfc_nlp_remove(phba, ndlp); 1219 } 1220 1221 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list, 1222 nlp_listp) { 1223 lpfc_nlp_remove(phba, ndlp); 1224 } 1225 1226 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list, 1227 nlp_listp) { 1228 lpfc_nlp_remove(phba, ndlp); 1229 } 1230 1231 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list, 1232 nlp_listp) { 1233 lpfc_nlp_remove(phba, ndlp); 1234 } 1235 1236 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 1237 nlp_listp) { 1238 lpfc_nlp_remove(phba, ndlp); 1239 } 1240 1241 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1242 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1243 INIT_LIST_HEAD(&phba->fc_unused_list); 1244 INIT_LIST_HEAD(&phba->fc_plogi_list); 1245 INIT_LIST_HEAD(&phba->fc_adisc_list); 1246 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1247 INIT_LIST_HEAD(&phba->fc_prli_list); 1248 INIT_LIST_HEAD(&phba->fc_npr_list); 1249 1250 phba->fc_map_cnt = 0; 1251 phba->fc_unmap_cnt = 0; 1252 phba->fc_plogi_cnt = 0; 1253 phba->fc_adisc_cnt = 0; 1254 phba->fc_reglogin_cnt = 0; 1255 phba->fc_prli_cnt = 0; 1256 phba->fc_npr_cnt = 0; 1257 phba->fc_unused_cnt= 0; 1258 return; 1259 } 1260 1261 static void 1262 lpfc_establish_link_tmo(unsigned long ptr) 1263 { 1264 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1265 unsigned long iflag; 1266 1267 1268 /* Re-establishing Link, timer expired */ 1269 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1270 "%d:1300 Re-establishing Link, timer expired " 1271 "Data: x%x x%x\n", 1272 phba->brd_no, phba->fc_flag, phba->hba_state); 1273 spin_lock_irqsave(phba->host->host_lock, iflag); 1274 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1275 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1276 } 1277 1278 static int 1279 lpfc_stop_timer(struct lpfc_hba * phba) 1280 { 1281 struct lpfc_sli *psli = &phba->sli; 1282 1283 /* Instead of a timer, this has been converted to a 1284 * deferred procedding list. 1285 */ 1286 while (!list_empty(&phba->freebufList)) { 1287 1288 struct lpfc_dmabuf *mp = NULL; 1289 1290 list_remove_head((&phba->freebufList), mp, 1291 struct lpfc_dmabuf, list); 1292 if (mp) { 1293 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1294 kfree(mp); 1295 } 1296 } 1297 1298 del_timer_sync(&phba->fcp_poll_timer); 1299 del_timer_sync(&phba->fc_estabtmo); 1300 del_timer_sync(&phba->fc_disctmo); 1301 del_timer_sync(&phba->fc_fdmitmo); 1302 del_timer_sync(&phba->els_tmofunc); 1303 psli = &phba->sli; 1304 del_timer_sync(&psli->mbox_tmo); 1305 return(1); 1306 } 1307 1308 int 1309 lpfc_online(struct lpfc_hba * phba) 1310 { 1311 if (!phba) 1312 return 0; 1313 1314 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1315 return 0; 1316 1317 lpfc_printf_log(phba, 1318 KERN_WARNING, 1319 LOG_INIT, 1320 "%d:0458 Bring Adapter online\n", 1321 phba->brd_no); 1322 1323 if (!lpfc_sli_queue_setup(phba)) 1324 return 1; 1325 1326 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */ 1327 return 1; 1328 1329 spin_lock_irq(phba->host->host_lock); 1330 phba->fc_flag &= ~FC_OFFLINE_MODE; 1331 spin_unlock_irq(phba->host->host_lock); 1332 1333 return 0; 1334 } 1335 1336 int 1337 lpfc_offline(struct lpfc_hba * phba) 1338 { 1339 struct lpfc_sli_ring *pring; 1340 struct lpfc_sli *psli; 1341 unsigned long iflag; 1342 int i; 1343 int cnt = 0; 1344 1345 if (!phba) 1346 return 0; 1347 1348 if (phba->fc_flag & FC_OFFLINE_MODE) 1349 return 0; 1350 1351 psli = &phba->sli; 1352 1353 lpfc_linkdown(phba); 1354 1355 for (i = 0; i < psli->num_rings; i++) { 1356 pring = &psli->ring[i]; 1357 /* The linkdown event takes 30 seconds to timeout. */ 1358 while (pring->txcmplq_cnt) { 1359 mdelay(10); 1360 if (cnt++ > 3000) { 1361 lpfc_printf_log(phba, 1362 KERN_WARNING, LOG_INIT, 1363 "%d:0466 Outstanding IO when " 1364 "bringing Adapter offline\n", 1365 phba->brd_no); 1366 break; 1367 } 1368 } 1369 } 1370 1371 1372 /* stop all timers associated with this hba */ 1373 lpfc_stop_timer(phba); 1374 phba->work_hba_events = 0; 1375 1376 lpfc_printf_log(phba, 1377 KERN_WARNING, 1378 LOG_INIT, 1379 "%d:0460 Bring Adapter offline\n", 1380 phba->brd_no); 1381 1382 /* Bring down the SLI Layer and cleanup. The HBA is offline 1383 now. */ 1384 lpfc_sli_hba_down(phba); 1385 lpfc_cleanup(phba, 1); 1386 spin_lock_irqsave(phba->host->host_lock, iflag); 1387 phba->fc_flag |= FC_OFFLINE_MODE; 1388 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1389 return 0; 1390 } 1391 1392 /****************************************************************************** 1393 * Function name: lpfc_scsi_free 1394 * 1395 * Description: Called from lpfc_pci_remove_one free internal driver resources 1396 * 1397 ******************************************************************************/ 1398 static int 1399 lpfc_scsi_free(struct lpfc_hba * phba) 1400 { 1401 struct lpfc_scsi_buf *sb, *sb_next; 1402 struct lpfc_iocbq *io, *io_next; 1403 1404 spin_lock_irq(phba->host->host_lock); 1405 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1406 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1407 list_del(&sb->list); 1408 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1409 sb->dma_handle); 1410 kfree(sb); 1411 phba->total_scsi_bufs--; 1412 } 1413 1414 /* Release all the lpfc_iocbq entries maintained by this host. */ 1415 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1416 list_del(&io->list); 1417 kfree(io); 1418 phba->total_iocbq_bufs--; 1419 } 1420 1421 spin_unlock_irq(phba->host->host_lock); 1422 1423 return 0; 1424 } 1425 1426 1427 static int __devinit 1428 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1429 { 1430 struct Scsi_Host *host; 1431 struct lpfc_hba *phba; 1432 struct lpfc_sli *psli; 1433 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1434 unsigned long bar0map_len, bar2map_len; 1435 int error = -ENODEV, retval; 1436 int i; 1437 uint16_t iotag; 1438 1439 if (pci_enable_device(pdev)) 1440 goto out; 1441 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1442 goto out_disable_device; 1443 1444 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1445 if (!host) 1446 goto out_release_regions; 1447 1448 phba = (struct lpfc_hba*)host->hostdata; 1449 memset(phba, 0, sizeof (struct lpfc_hba)); 1450 phba->host = host; 1451 1452 phba->fc_flag |= FC_LOADING; 1453 phba->pcidev = pdev; 1454 1455 /* Assign an unused board number */ 1456 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1457 goto out_put_host; 1458 1459 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1460 if (error) 1461 goto out_put_host; 1462 1463 host->unique_id = phba->brd_no; 1464 INIT_LIST_HEAD(&phba->ctrspbuflist); 1465 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1466 INIT_LIST_HEAD(&phba->freebufList); 1467 1468 /* Initialize timers used by driver */ 1469 init_timer(&phba->fc_estabtmo); 1470 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1471 phba->fc_estabtmo.data = (unsigned long)phba; 1472 init_timer(&phba->fc_disctmo); 1473 phba->fc_disctmo.function = lpfc_disc_timeout; 1474 phba->fc_disctmo.data = (unsigned long)phba; 1475 1476 init_timer(&phba->fc_fdmitmo); 1477 phba->fc_fdmitmo.function = lpfc_fdmi_tmo; 1478 phba->fc_fdmitmo.data = (unsigned long)phba; 1479 init_timer(&phba->els_tmofunc); 1480 phba->els_tmofunc.function = lpfc_els_timeout; 1481 phba->els_tmofunc.data = (unsigned long)phba; 1482 psli = &phba->sli; 1483 init_timer(&psli->mbox_tmo); 1484 psli->mbox_tmo.function = lpfc_mbox_timeout; 1485 psli->mbox_tmo.data = (unsigned long)phba; 1486 1487 init_timer(&phba->fcp_poll_timer); 1488 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1489 phba->fcp_poll_timer.data = (unsigned long)phba; 1490 1491 /* 1492 * Get all the module params for configuring this host and then 1493 * establish the host parameters. 1494 */ 1495 lpfc_get_cfgparam(phba); 1496 1497 host->max_id = LPFC_MAX_TARGET; 1498 host->max_lun = phba->cfg_max_luns; 1499 host->this_id = -1; 1500 1501 /* Initialize all internally managed lists. */ 1502 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1503 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1504 INIT_LIST_HEAD(&phba->fc_unused_list); 1505 INIT_LIST_HEAD(&phba->fc_plogi_list); 1506 INIT_LIST_HEAD(&phba->fc_adisc_list); 1507 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1508 INIT_LIST_HEAD(&phba->fc_prli_list); 1509 INIT_LIST_HEAD(&phba->fc_npr_list); 1510 1511 1512 pci_set_master(pdev); 1513 retval = pci_set_mwi(pdev); 1514 if (retval) 1515 dev_printk(KERN_WARNING, &pdev->dev, 1516 "Warning: pci_set_mwi returned %d\n", retval); 1517 1518 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1519 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1520 goto out_idr_remove; 1521 1522 /* 1523 * Get the bus address of Bar0 and Bar2 and the number of bytes 1524 * required by each mapping. 1525 */ 1526 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 1527 bar0map_len = pci_resource_len(phba->pcidev, 0); 1528 1529 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 1530 bar2map_len = pci_resource_len(phba->pcidev, 2); 1531 1532 /* Map HBA SLIM to a kernel virtual address. */ 1533 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 1534 if (!phba->slim_memmap_p) { 1535 error = -ENODEV; 1536 dev_printk(KERN_ERR, &pdev->dev, 1537 "ioremap failed for SLIM memory.\n"); 1538 goto out_idr_remove; 1539 } 1540 1541 /* Map HBA Control Registers to a kernel virtual address. */ 1542 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 1543 if (!phba->ctrl_regs_memmap_p) { 1544 error = -ENODEV; 1545 dev_printk(KERN_ERR, &pdev->dev, 1546 "ioremap failed for HBA control registers.\n"); 1547 goto out_iounmap_slim; 1548 } 1549 1550 /* Allocate memory for SLI-2 structures */ 1551 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1552 &phba->slim2p_mapping, GFP_KERNEL); 1553 if (!phba->slim2p) 1554 goto out_iounmap; 1555 1556 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 1557 1558 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1559 lpfc_sli_setup(phba); 1560 lpfc_sli_queue_setup(phba); 1561 1562 error = lpfc_mem_alloc(phba); 1563 if (error) 1564 goto out_free_slim; 1565 1566 /* Initialize and populate the iocb list per host. */ 1567 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1568 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 1569 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 1570 if (iocbq_entry == NULL) { 1571 printk(KERN_ERR "%s: only allocated %d iocbs of " 1572 "expected %d count. Unloading driver.\n", 1573 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 1574 error = -ENOMEM; 1575 goto out_free_iocbq; 1576 } 1577 1578 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq)); 1579 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 1580 if (iotag == 0) { 1581 kfree (iocbq_entry); 1582 printk(KERN_ERR "%s: failed to allocate IOTAG. " 1583 "Unloading driver.\n", 1584 __FUNCTION__); 1585 error = -ENOMEM; 1586 goto out_free_iocbq; 1587 } 1588 spin_lock_irq(phba->host->host_lock); 1589 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1590 phba->total_iocbq_bufs++; 1591 spin_unlock_irq(phba->host->host_lock); 1592 } 1593 1594 /* Initialize HBA structure */ 1595 phba->fc_edtov = FF_DEF_EDTOV; 1596 phba->fc_ratov = FF_DEF_RATOV; 1597 phba->fc_altov = FF_DEF_ALTOV; 1598 phba->fc_arbtov = FF_DEF_ARBTOV; 1599 1600 INIT_LIST_HEAD(&phba->work_list); 1601 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 1602 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 1603 1604 /* Startup the kernel thread for this host adapter. */ 1605 phba->worker_thread = kthread_run(lpfc_do_work, phba, 1606 "lpfc_worker_%d", phba->brd_no); 1607 if (IS_ERR(phba->worker_thread)) { 1608 error = PTR_ERR(phba->worker_thread); 1609 goto out_free_iocbq; 1610 } 1611 1612 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1613 host->can_queue = phba->cfg_hba_queue_depth - 10; 1614 1615 /* Tell the midlayer we support 16 byte commands */ 1616 host->max_cmd_len = 16; 1617 1618 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1619 spin_lock_init(&phba->scsi_buf_list_lock); 1620 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1621 1622 host->transportt = lpfc_transport_template; 1623 pci_set_drvdata(pdev, host); 1624 error = scsi_add_host(host, &pdev->dev); 1625 if (error) 1626 goto out_kthread_stop; 1627 1628 error = lpfc_alloc_sysfs_attr(phba); 1629 if (error) 1630 goto out_remove_host; 1631 1632 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1633 LPFC_DRIVER_NAME, phba); 1634 if (error) { 1635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1636 "%d:0451 Enable interrupt handler failed\n", 1637 phba->brd_no); 1638 goto out_free_sysfs_attr; 1639 } 1640 phba->MBslimaddr = phba->slim_memmap_p; 1641 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1642 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1643 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 1644 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1645 1646 error = lpfc_sli_hba_setup(phba); 1647 if (error) { 1648 error = -ENODEV; 1649 goto out_free_irq; 1650 } 1651 1652 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1653 spin_lock_irq(phba->host->host_lock); 1654 lpfc_poll_start_timer(phba); 1655 spin_unlock_irq(phba->host->host_lock); 1656 } 1657 1658 /* 1659 * set fixed host attributes 1660 * Must done after lpfc_sli_hba_setup() 1661 */ 1662 1663 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn); 1664 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn); 1665 fc_host_supported_classes(host) = FC_COS_CLASS3; 1666 1667 memset(fc_host_supported_fc4s(host), 0, 1668 sizeof(fc_host_supported_fc4s(host))); 1669 fc_host_supported_fc4s(host)[2] = 1; 1670 fc_host_supported_fc4s(host)[7] = 1; 1671 1672 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host)); 1673 1674 fc_host_supported_speeds(host) = 0; 1675 if (phba->lmt & LMT_10Gb) 1676 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT; 1677 if (phba->lmt & LMT_4Gb) 1678 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT; 1679 if (phba->lmt & LMT_2Gb) 1680 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT; 1681 if (phba->lmt & LMT_1Gb) 1682 fc_host_supported_speeds(host) |= FC_PORTSPEED_1GBIT; 1683 1684 fc_host_maxframe_size(host) = 1685 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1686 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1687 1688 /* This value is also unchanging */ 1689 memset(fc_host_active_fc4s(host), 0, 1690 sizeof(fc_host_active_fc4s(host))); 1691 fc_host_active_fc4s(host)[2] = 1; 1692 fc_host_active_fc4s(host)[7] = 1; 1693 1694 spin_lock_irq(phba->host->host_lock); 1695 phba->fc_flag &= ~FC_LOADING; 1696 spin_unlock_irq(phba->host->host_lock); 1697 return 0; 1698 1699 out_free_irq: 1700 lpfc_stop_timer(phba); 1701 phba->work_hba_events = 0; 1702 free_irq(phba->pcidev->irq, phba); 1703 out_free_sysfs_attr: 1704 lpfc_free_sysfs_attr(phba); 1705 out_remove_host: 1706 fc_remove_host(phba->host); 1707 scsi_remove_host(phba->host); 1708 out_kthread_stop: 1709 kthread_stop(phba->worker_thread); 1710 out_free_iocbq: 1711 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1712 &phba->lpfc_iocb_list, list) { 1713 spin_lock_irq(phba->host->host_lock); 1714 kfree(iocbq_entry); 1715 phba->total_iocbq_bufs--; 1716 spin_unlock_irq(phba->host->host_lock); 1717 } 1718 lpfc_mem_free(phba); 1719 out_free_slim: 1720 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1721 phba->slim2p_mapping); 1722 out_iounmap: 1723 iounmap(phba->ctrl_regs_memmap_p); 1724 out_iounmap_slim: 1725 iounmap(phba->slim_memmap_p); 1726 out_idr_remove: 1727 idr_remove(&lpfc_hba_index, phba->brd_no); 1728 out_put_host: 1729 phba->host = NULL; 1730 scsi_host_put(host); 1731 out_release_regions: 1732 pci_release_regions(pdev); 1733 out_disable_device: 1734 pci_disable_device(pdev); 1735 out: 1736 pci_set_drvdata(pdev, NULL); 1737 return error; 1738 } 1739 1740 static void __devexit 1741 lpfc_pci_remove_one(struct pci_dev *pdev) 1742 { 1743 struct Scsi_Host *host = pci_get_drvdata(pdev); 1744 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; 1745 unsigned long iflag; 1746 1747 lpfc_free_sysfs_attr(phba); 1748 1749 spin_lock_irqsave(phba->host->host_lock, iflag); 1750 phba->fc_flag |= FC_UNLOADING; 1751 1752 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1753 1754 fc_remove_host(phba->host); 1755 scsi_remove_host(phba->host); 1756 1757 kthread_stop(phba->worker_thread); 1758 1759 /* 1760 * Bring down the SLI Layer. This step disable all interrupts, 1761 * clears the rings, discards all mailbox commands, and resets 1762 * the HBA. 1763 */ 1764 lpfc_sli_hba_down(phba); 1765 lpfc_sli_brdrestart(phba); 1766 1767 /* Release the irq reservation */ 1768 free_irq(phba->pcidev->irq, phba); 1769 1770 lpfc_cleanup(phba, 0); 1771 lpfc_stop_timer(phba); 1772 phba->work_hba_events = 0; 1773 1774 /* 1775 * Call scsi_free before mem_free since scsi bufs are released to their 1776 * corresponding pools here. 1777 */ 1778 lpfc_scsi_free(phba); 1779 lpfc_mem_free(phba); 1780 1781 /* Free resources associated with SLI2 interface */ 1782 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 1783 phba->slim2p, phba->slim2p_mapping); 1784 1785 /* unmap adapter SLIM and Control Registers */ 1786 iounmap(phba->ctrl_regs_memmap_p); 1787 iounmap(phba->slim_memmap_p); 1788 1789 pci_release_regions(phba->pcidev); 1790 pci_disable_device(phba->pcidev); 1791 1792 idr_remove(&lpfc_hba_index, phba->brd_no); 1793 scsi_host_put(phba->host); 1794 1795 pci_set_drvdata(pdev, NULL); 1796 } 1797 1798 static struct pci_device_id lpfc_id_table[] = { 1799 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 1800 PCI_ANY_ID, PCI_ANY_ID, }, 1801 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 1802 PCI_ANY_ID, PCI_ANY_ID, }, 1803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 1804 PCI_ANY_ID, PCI_ANY_ID, }, 1805 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 1806 PCI_ANY_ID, PCI_ANY_ID, }, 1807 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 1808 PCI_ANY_ID, PCI_ANY_ID, }, 1809 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 1810 PCI_ANY_ID, PCI_ANY_ID, }, 1811 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 1812 PCI_ANY_ID, PCI_ANY_ID, }, 1813 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 1814 PCI_ANY_ID, PCI_ANY_ID, }, 1815 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 1816 PCI_ANY_ID, PCI_ANY_ID, }, 1817 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 1818 PCI_ANY_ID, PCI_ANY_ID, }, 1819 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 1820 PCI_ANY_ID, PCI_ANY_ID, }, 1821 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 1822 PCI_ANY_ID, PCI_ANY_ID, }, 1823 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 1824 PCI_ANY_ID, PCI_ANY_ID, }, 1825 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 1826 PCI_ANY_ID, PCI_ANY_ID, }, 1827 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 1828 PCI_ANY_ID, PCI_ANY_ID, }, 1829 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 1830 PCI_ANY_ID, PCI_ANY_ID, }, 1831 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 1832 PCI_ANY_ID, PCI_ANY_ID, }, 1833 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 1834 PCI_ANY_ID, PCI_ANY_ID, }, 1835 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 1836 PCI_ANY_ID, PCI_ANY_ID, }, 1837 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 1838 PCI_ANY_ID, PCI_ANY_ID, }, 1839 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 1840 PCI_ANY_ID, PCI_ANY_ID, }, 1841 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 1842 PCI_ANY_ID, PCI_ANY_ID, }, 1843 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 1844 PCI_ANY_ID, PCI_ANY_ID, }, 1845 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 1846 PCI_ANY_ID, PCI_ANY_ID, }, 1847 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 1848 PCI_ANY_ID, PCI_ANY_ID, }, 1849 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 1850 PCI_ANY_ID, PCI_ANY_ID, }, 1851 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 1852 PCI_ANY_ID, PCI_ANY_ID, }, 1853 { 0 } 1854 }; 1855 1856 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 1857 1858 static struct pci_driver lpfc_driver = { 1859 .name = LPFC_DRIVER_NAME, 1860 .id_table = lpfc_id_table, 1861 .probe = lpfc_pci_probe_one, 1862 .remove = __devexit_p(lpfc_pci_remove_one), 1863 }; 1864 1865 static int __init 1866 lpfc_init(void) 1867 { 1868 int error = 0; 1869 1870 printk(LPFC_MODULE_DESC "\n"); 1871 printk(LPFC_COPYRIGHT "\n"); 1872 1873 lpfc_transport_template = 1874 fc_attach_transport(&lpfc_transport_functions); 1875 if (!lpfc_transport_template) 1876 return -ENOMEM; 1877 error = pci_register_driver(&lpfc_driver); 1878 if (error) 1879 fc_release_transport(lpfc_transport_template); 1880 1881 return error; 1882 } 1883 1884 static void __exit 1885 lpfc_exit(void) 1886 { 1887 pci_unregister_driver(&lpfc_driver); 1888 fc_release_transport(lpfc_transport_template); 1889 } 1890 1891 module_init(lpfc_init); 1892 module_exit(lpfc_exit); 1893 MODULE_LICENSE("GPL"); 1894 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 1895 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 1896 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 1897