1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_host.h> 34 #include <scsi/scsi_transport_fc.h> 35 36 #include "lpfc_hw.h" 37 #include "lpfc_sli.h" 38 #include "lpfc_disc.h" 39 #include "lpfc_scsi.h" 40 #include "lpfc.h" 41 #include "lpfc_logmsg.h" 42 #include "lpfc_crtn.h" 43 #include "lpfc_version.h" 44 45 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *); 46 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 47 static int lpfc_post_rcv_buf(struct lpfc_hba *); 48 49 static struct scsi_transport_template *lpfc_transport_template = NULL; 50 static DEFINE_IDR(lpfc_hba_index); 51 52 /************************************************************************/ 53 /* */ 54 /* lpfc_config_port_prep */ 55 /* This routine will do LPFC initialization prior to the */ 56 /* CONFIG_PORT mailbox command. This will be initialized */ 57 /* as a SLI layer callback routine. */ 58 /* This routine returns 0 on success or -ERESTART if it wants */ 59 /* the SLI layer to reset the HBA and try again. Any */ 60 /* other return value indicates an error. */ 61 /* */ 62 /************************************************************************/ 63 int 64 lpfc_config_port_prep(struct lpfc_hba * phba) 65 { 66 lpfc_vpd_t *vp = &phba->vpd; 67 int i = 0, rc; 68 LPFC_MBOXQ_t *pmb; 69 MAILBOX_t *mb; 70 char *lpfc_vpd_data = NULL; 71 uint16_t offset = 0; 72 static char licensed[56] = 73 "key unlock for use with gnu public licensed code only\0"; 74 75 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 76 if (!pmb) { 77 phba->hba_state = LPFC_HBA_ERROR; 78 return -ENOMEM; 79 } 80 81 mb = &pmb->mb; 82 phba->hba_state = LPFC_INIT_MBX_CMDS; 83 84 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 85 uint32_t *ptext = (uint32_t *) licensed; 86 87 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 88 *ptext = cpu_to_be32(*ptext); 89 90 lpfc_read_nv(phba, pmb); 91 memset((char*)mb->un.varRDnvp.rsvd3, 0, 92 sizeof (mb->un.varRDnvp.rsvd3)); 93 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 94 sizeof (licensed)); 95 96 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 97 98 if (rc != MBX_SUCCESS) { 99 lpfc_printf_log(phba, 100 KERN_ERR, 101 LOG_MBOX, 102 "%d:0324 Config Port initialization " 103 "error, mbxCmd x%x READ_NVPARM, " 104 "mbxStatus x%x\n", 105 phba->brd_no, 106 mb->mbxCommand, mb->mbxStatus); 107 mempool_free(pmb, phba->mbox_mem_pool); 108 return -ERESTART; 109 } 110 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 111 sizeof (mb->un.varRDnvp.nodename)); 112 } 113 114 /* Setup and issue mailbox READ REV command */ 115 lpfc_read_rev(phba, pmb); 116 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 117 if (rc != MBX_SUCCESS) { 118 lpfc_printf_log(phba, 119 KERN_ERR, 120 LOG_INIT, 121 "%d:0439 Adapter failed to init, mbxCmd x%x " 122 "READ_REV, mbxStatus x%x\n", 123 phba->brd_no, 124 mb->mbxCommand, mb->mbxStatus); 125 mempool_free( pmb, phba->mbox_mem_pool); 126 return -ERESTART; 127 } 128 129 /* The HBA's current state is provided by the ProgType and rr fields. 130 * Read and check the value of these fields before continuing to config 131 * this port. 132 */ 133 if (mb->un.varRdRev.rr == 0 || mb->un.varRdRev.un.b.ProgType != 2) { 134 /* Old firmware */ 135 vp->rev.rBit = 0; 136 lpfc_printf_log(phba, 137 KERN_ERR, 138 LOG_INIT, 139 "%d:0440 Adapter failed to init, mbxCmd x%x " 140 "READ_REV detected outdated firmware" 141 "Data: x%x\n", 142 phba->brd_no, 143 mb->mbxCommand, 0); 144 mempool_free(pmb, phba->mbox_mem_pool); 145 return -ERESTART; 146 } else { 147 vp->rev.rBit = 1; 148 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 149 memcpy(vp->rev.sli1FwName, 150 (char*)mb->un.varRdRev.sli1FwName, 16); 151 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 152 memcpy(vp->rev.sli2FwName, 153 (char *)mb->un.varRdRev.sli2FwName, 16); 154 } 155 156 /* Save information as VPD data */ 157 vp->rev.biuRev = mb->un.varRdRev.biuRev; 158 vp->rev.smRev = mb->un.varRdRev.smRev; 159 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 160 vp->rev.endecRev = mb->un.varRdRev.endecRev; 161 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 162 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 163 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 164 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 165 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 166 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 167 168 if (lpfc_is_LC_HBA(phba->pcidev->device)) 169 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 170 sizeof (phba->RandomData)); 171 172 /* Get the default values for Model Name and Description */ 173 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 174 175 /* Get adapter VPD information */ 176 pmb->context2 = kmalloc(DMP_RSP_SIZE, GFP_KERNEL); 177 if (!pmb->context2) 178 goto out_free_mbox; 179 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 180 if (!lpfc_vpd_data) 181 goto out_free_context2; 182 183 do { 184 lpfc_dump_mem(phba, pmb, offset); 185 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 186 187 if (rc != MBX_SUCCESS) { 188 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 189 "%d:0441 VPD not present on adapter, " 190 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 191 phba->brd_no, 192 mb->mbxCommand, mb->mbxStatus); 193 kfree(lpfc_vpd_data); 194 lpfc_vpd_data = NULL; 195 break; 196 } 197 198 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 199 mb->un.varDmp.word_cnt); 200 offset += mb->un.varDmp.word_cnt; 201 } while (mb->un.varDmp.word_cnt); 202 lpfc_parse_vpd(phba, lpfc_vpd_data); 203 204 kfree(lpfc_vpd_data); 205 out_free_context2: 206 kfree(pmb->context2); 207 out_free_mbox: 208 mempool_free(pmb, phba->mbox_mem_pool); 209 return 0; 210 } 211 212 /************************************************************************/ 213 /* */ 214 /* lpfc_config_port_post */ 215 /* This routine will do LPFC initialization after the */ 216 /* CONFIG_PORT mailbox command. This will be initialized */ 217 /* as a SLI layer callback routine. */ 218 /* This routine returns 0 on success. Any other return value */ 219 /* indicates an error. */ 220 /* */ 221 /************************************************************************/ 222 int 223 lpfc_config_port_post(struct lpfc_hba * phba) 224 { 225 LPFC_MBOXQ_t *pmb; 226 MAILBOX_t *mb; 227 struct lpfc_dmabuf *mp; 228 struct lpfc_sli *psli = &phba->sli; 229 uint32_t status, timeout; 230 int i, j, rc; 231 232 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 233 if (!pmb) { 234 phba->hba_state = LPFC_HBA_ERROR; 235 return -ENOMEM; 236 } 237 mb = &pmb->mb; 238 239 lpfc_config_link(phba, pmb); 240 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 241 if (rc != MBX_SUCCESS) { 242 lpfc_printf_log(phba, 243 KERN_ERR, 244 LOG_INIT, 245 "%d:0447 Adapter failed init, mbxCmd x%x " 246 "CONFIG_LINK mbxStatus x%x\n", 247 phba->brd_no, 248 mb->mbxCommand, mb->mbxStatus); 249 phba->hba_state = LPFC_HBA_ERROR; 250 mempool_free( pmb, phba->mbox_mem_pool); 251 return -EIO; 252 } 253 254 /* Get login parameters for NID. */ 255 lpfc_read_sparam(phba, pmb); 256 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 257 lpfc_printf_log(phba, 258 KERN_ERR, 259 LOG_INIT, 260 "%d:0448 Adapter failed init, mbxCmd x%x " 261 "READ_SPARM mbxStatus x%x\n", 262 phba->brd_no, 263 mb->mbxCommand, mb->mbxStatus); 264 phba->hba_state = LPFC_HBA_ERROR; 265 mp = (struct lpfc_dmabuf *) pmb->context1; 266 mempool_free( pmb, phba->mbox_mem_pool); 267 lpfc_mbuf_free(phba, mp->virt, mp->phys); 268 kfree(mp); 269 return -EIO; 270 } 271 272 mp = (struct lpfc_dmabuf *) pmb->context1; 273 274 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 275 lpfc_mbuf_free(phba, mp->virt, mp->phys); 276 kfree(mp); 277 pmb->context1 = NULL; 278 279 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 280 sizeof (struct lpfc_name)); 281 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 282 sizeof (struct lpfc_name)); 283 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 284 /* This should be consolidated into parse_vpd ? - mr */ 285 if (phba->SerialNumber[0] == 0) { 286 uint8_t *outptr; 287 288 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0]; 289 for (i = 0; i < 12; i++) { 290 status = *outptr++; 291 j = ((status & 0xf0) >> 4); 292 if (j <= 9) 293 phba->SerialNumber[i] = 294 (char)((uint8_t) 0x30 + (uint8_t) j); 295 else 296 phba->SerialNumber[i] = 297 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 298 i++; 299 j = (status & 0xf); 300 if (j <= 9) 301 phba->SerialNumber[i] = 302 (char)((uint8_t) 0x30 + (uint8_t) j); 303 else 304 phba->SerialNumber[i] = 305 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 306 } 307 } 308 309 /* This should turn on DELAYED ABTS for ELS timeouts */ 310 lpfc_set_slim(phba, pmb, 0x052198, 0x1); 311 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 312 phba->hba_state = LPFC_HBA_ERROR; 313 mempool_free( pmb, phba->mbox_mem_pool); 314 return -EIO; 315 } 316 317 318 lpfc_read_config(phba, pmb); 319 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 320 lpfc_printf_log(phba, 321 KERN_ERR, 322 LOG_INIT, 323 "%d:0453 Adapter failed to init, mbxCmd x%x " 324 "READ_CONFIG, mbxStatus x%x\n", 325 phba->brd_no, 326 mb->mbxCommand, mb->mbxStatus); 327 phba->hba_state = LPFC_HBA_ERROR; 328 mempool_free( pmb, phba->mbox_mem_pool); 329 return -EIO; 330 } 331 332 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 333 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 334 phba->cfg_hba_queue_depth = 335 mb->un.varRdConfig.max_xri + 1; 336 337 phba->lmt = mb->un.varRdConfig.lmt; 338 /* HBA is not 4GB capable, or HBA is not 2GB capable, 339 don't let link speed ask for it */ 340 if ((((phba->lmt & LMT_4250_10bit) != LMT_4250_10bit) && 341 (phba->cfg_link_speed > LINK_SPEED_2G)) || 342 (((phba->lmt & LMT_2125_10bit) != LMT_2125_10bit) && 343 (phba->cfg_link_speed > LINK_SPEED_1G))) { 344 /* Reset link speed to auto. 1G/2GB HBA cfg'd for 4G */ 345 lpfc_printf_log(phba, 346 KERN_WARNING, 347 LOG_LINK_EVENT, 348 "%d:1302 Invalid speed for this board: " 349 "Reset link speed to auto: x%x\n", 350 phba->brd_no, 351 phba->cfg_link_speed); 352 phba->cfg_link_speed = LINK_SPEED_AUTO; 353 } 354 355 phba->hba_state = LPFC_LINK_DOWN; 356 357 /* Only process IOCBs on ring 0 till hba_state is READY */ 358 if (psli->ring[psli->ip_ring].cmdringaddr) 359 psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT; 360 if (psli->ring[psli->fcp_ring].cmdringaddr) 361 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 362 if (psli->ring[psli->next_ring].cmdringaddr) 363 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 364 365 /* Post receive buffers for desired rings */ 366 lpfc_post_rcv_buf(phba); 367 368 /* Enable appropriate host interrupts */ 369 spin_lock_irq(phba->host->host_lock); 370 status = readl(phba->HCregaddr); 371 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 372 if (psli->num_rings > 0) 373 status |= HC_R0INT_ENA; 374 if (psli->num_rings > 1) 375 status |= HC_R1INT_ENA; 376 if (psli->num_rings > 2) 377 status |= HC_R2INT_ENA; 378 if (psli->num_rings > 3) 379 status |= HC_R3INT_ENA; 380 381 writel(status, phba->HCregaddr); 382 readl(phba->HCregaddr); /* flush */ 383 spin_unlock_irq(phba->host->host_lock); 384 385 /* 386 * Setup the ring 0 (els) timeout handler 387 */ 388 timeout = phba->fc_ratov << 1; 389 phba->els_tmofunc.expires = jiffies + HZ * timeout; 390 add_timer(&phba->els_tmofunc); 391 392 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 393 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 394 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) != MBX_SUCCESS) { 395 lpfc_printf_log(phba, 396 KERN_ERR, 397 LOG_INIT, 398 "%d:0454 Adapter failed to init, mbxCmd x%x " 399 "INIT_LINK, mbxStatus x%x\n", 400 phba->brd_no, 401 mb->mbxCommand, mb->mbxStatus); 402 403 /* Clear all interrupt enable conditions */ 404 writel(0, phba->HCregaddr); 405 readl(phba->HCregaddr); /* flush */ 406 /* Clear all pending interrupts */ 407 writel(0xffffffff, phba->HAregaddr); 408 readl(phba->HAregaddr); /* flush */ 409 410 phba->hba_state = LPFC_HBA_ERROR; 411 mempool_free(pmb, phba->mbox_mem_pool); 412 return -EIO; 413 } 414 /* MBOX buffer will be freed in mbox compl */ 415 416 i = 0; 417 while ((phba->hba_state != LPFC_HBA_READY) || 418 (phba->num_disc_nodes) || (phba->fc_prli_sent) || 419 ((phba->fc_map_cnt == 0) && (i<2)) || 420 (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) { 421 /* Check every second for 30 retries. */ 422 i++; 423 if (i > 30) { 424 break; 425 } 426 if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) { 427 /* The link is down. Set linkdown timeout */ 428 break; 429 } 430 431 /* Delay for 1 second to give discovery time to complete. */ 432 msleep(1000); 433 434 } 435 436 /* Since num_disc_nodes keys off of PLOGI, delay a bit to let 437 * any potential PRLIs to flush thru the SLI sub-system. 438 */ 439 msleep(50); 440 441 return (0); 442 } 443 444 /************************************************************************/ 445 /* */ 446 /* lpfc_hba_down_prep */ 447 /* This routine will do LPFC uninitialization before the */ 448 /* HBA is reset when bringing down the SLI Layer. This will be */ 449 /* initialized as a SLI layer callback routine. */ 450 /* This routine returns 0 on success. Any other return value */ 451 /* indicates an error. */ 452 /* */ 453 /************************************************************************/ 454 int 455 lpfc_hba_down_prep(struct lpfc_hba * phba) 456 { 457 /* Disable interrupts */ 458 writel(0, phba->HCregaddr); 459 readl(phba->HCregaddr); /* flush */ 460 461 /* Cleanup potential discovery resources */ 462 lpfc_els_flush_rscn(phba); 463 lpfc_els_flush_cmd(phba); 464 lpfc_disc_flush_list(phba); 465 466 return (0); 467 } 468 469 /************************************************************************/ 470 /* */ 471 /* lpfc_handle_eratt */ 472 /* This routine will handle processing a Host Attention */ 473 /* Error Status event. This will be initialized */ 474 /* as a SLI layer callback routine. */ 475 /* */ 476 /************************************************************************/ 477 void 478 lpfc_handle_eratt(struct lpfc_hba * phba) 479 { 480 struct lpfc_sli *psli = &phba->sli; 481 struct lpfc_sli_ring *pring; 482 483 /* 484 * If a reset is sent to the HBA restore PCI configuration registers. 485 */ 486 if ( phba->hba_state == LPFC_INIT_START ) { 487 mdelay(1); 488 readl(phba->HCregaddr); /* flush */ 489 writel(0, phba->HCregaddr); 490 readl(phba->HCregaddr); /* flush */ 491 492 /* Restore PCI cmd register */ 493 pci_write_config_word(phba->pcidev, 494 PCI_COMMAND, phba->pci_cfg_value); 495 } 496 497 if (phba->work_hs & HS_FFER6) { 498 /* Re-establishing Link */ 499 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 500 "%d:1301 Re-establishing Link " 501 "Data: x%x x%x x%x\n", 502 phba->brd_no, phba->work_hs, 503 phba->work_status[0], phba->work_status[1]); 504 spin_lock_irq(phba->host->host_lock); 505 phba->fc_flag |= FC_ESTABLISH_LINK; 506 spin_unlock_irq(phba->host->host_lock); 507 508 /* 509 * Firmware stops when it triggled erratt with HS_FFER6. 510 * That could cause the I/Os dropped by the firmware. 511 * Error iocb (I/O) on txcmplq and let the SCSI layer 512 * retry it after re-establishing link. 513 */ 514 pring = &psli->ring[psli->fcp_ring]; 515 lpfc_sli_abort_iocb_ring(phba, pring); 516 517 518 /* 519 * There was a firmware error. Take the hba offline and then 520 * attempt to restart it. 521 */ 522 lpfc_offline(phba); 523 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 524 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 525 return; 526 } 527 } else { 528 /* The if clause above forces this code path when the status 529 * failure is a value other than FFER6. Do not call the offline 530 * twice. This is the adapter hardware error path. 531 */ 532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 533 "%d:0457 Adapter Hardware Error " 534 "Data: x%x x%x x%x\n", 535 phba->brd_no, phba->work_hs, 536 phba->work_status[0], phba->work_status[1]); 537 538 lpfc_offline(phba); 539 540 /* 541 * Restart all traffic to this host. Since the fc_transport 542 * block functions (future) were not called in lpfc_offline, 543 * don't call them here. 544 */ 545 scsi_unblock_requests(phba->host); 546 } 547 } 548 549 /************************************************************************/ 550 /* */ 551 /* lpfc_handle_latt */ 552 /* This routine will handle processing a Host Attention */ 553 /* Link Status event. This will be initialized */ 554 /* as a SLI layer callback routine. */ 555 /* */ 556 /************************************************************************/ 557 void 558 lpfc_handle_latt(struct lpfc_hba * phba) 559 { 560 struct lpfc_sli *psli = &phba->sli; 561 LPFC_MBOXQ_t *pmb; 562 volatile uint32_t control; 563 struct lpfc_dmabuf *mp; 564 int rc = -ENOMEM; 565 566 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 567 if (!pmb) 568 goto lpfc_handle_latt_err_exit; 569 570 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 571 if (!mp) 572 goto lpfc_handle_latt_free_pmb; 573 574 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 575 if (!mp->virt) 576 goto lpfc_handle_latt_free_mp; 577 578 rc = -EIO; 579 580 581 psli->slistat.link_event++; 582 lpfc_read_la(phba, pmb, mp); 583 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 584 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 585 if (rc == MBX_NOT_FINISHED) 586 goto lpfc_handle_latt_free_mp; 587 588 /* Clear Link Attention in HA REG */ 589 spin_lock_irq(phba->host->host_lock); 590 writel(HA_LATT, phba->HAregaddr); 591 readl(phba->HAregaddr); /* flush */ 592 spin_unlock_irq(phba->host->host_lock); 593 594 return; 595 596 lpfc_handle_latt_free_mp: 597 kfree(mp); 598 lpfc_handle_latt_free_pmb: 599 kfree(pmb); 600 lpfc_handle_latt_err_exit: 601 /* Enable Link attention interrupts */ 602 spin_lock_irq(phba->host->host_lock); 603 psli->sli_flag |= LPFC_PROCESS_LA; 604 control = readl(phba->HCregaddr); 605 control |= HC_LAINT_ENA; 606 writel(control, phba->HCregaddr); 607 readl(phba->HCregaddr); /* flush */ 608 609 /* Clear Link Attention in HA REG */ 610 writel(HA_LATT, phba->HAregaddr); 611 readl(phba->HAregaddr); /* flush */ 612 spin_unlock_irq(phba->host->host_lock); 613 lpfc_linkdown(phba); 614 phba->hba_state = LPFC_HBA_ERROR; 615 616 /* The other case is an error from issue_mbox */ 617 if (rc == -ENOMEM) 618 lpfc_printf_log(phba, 619 KERN_WARNING, 620 LOG_MBOX, 621 "%d:0300 READ_LA: no buffers\n", 622 phba->brd_no); 623 624 return; 625 } 626 627 /************************************************************************/ 628 /* */ 629 /* lpfc_parse_vpd */ 630 /* This routine will parse the VPD data */ 631 /* */ 632 /************************************************************************/ 633 static int 634 lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd) 635 { 636 uint8_t lenlo, lenhi; 637 uint32_t Length; 638 int i, j; 639 int finished = 0; 640 int index = 0; 641 642 if (!vpd) 643 return 0; 644 645 /* Vital Product */ 646 lpfc_printf_log(phba, 647 KERN_INFO, 648 LOG_INIT, 649 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 650 phba->brd_no, 651 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 652 (uint32_t) vpd[3]); 653 do { 654 switch (vpd[index]) { 655 case 0x82: 656 index += 1; 657 lenlo = vpd[index]; 658 index += 1; 659 lenhi = vpd[index]; 660 index += 1; 661 i = ((((unsigned short)lenhi) << 8) + lenlo); 662 index += i; 663 break; 664 case 0x90: 665 index += 1; 666 lenlo = vpd[index]; 667 index += 1; 668 lenhi = vpd[index]; 669 index += 1; 670 Length = ((((unsigned short)lenhi) << 8) + lenlo); 671 672 while (Length > 0) { 673 /* Look for Serial Number */ 674 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 675 index += 2; 676 i = vpd[index]; 677 index += 1; 678 j = 0; 679 Length -= (3+i); 680 while(i--) { 681 phba->SerialNumber[j++] = vpd[index++]; 682 if (j == 31) 683 break; 684 } 685 phba->SerialNumber[j] = 0; 686 continue; 687 } 688 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 689 phba->vpd_flag |= VPD_MODEL_DESC; 690 index += 2; 691 i = vpd[index]; 692 index += 1; 693 j = 0; 694 Length -= (3+i); 695 while(i--) { 696 phba->ModelDesc[j++] = vpd[index++]; 697 if (j == 255) 698 break; 699 } 700 phba->ModelDesc[j] = 0; 701 continue; 702 } 703 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 704 phba->vpd_flag |= VPD_MODEL_NAME; 705 index += 2; 706 i = vpd[index]; 707 index += 1; 708 j = 0; 709 Length -= (3+i); 710 while(i--) { 711 phba->ModelName[j++] = vpd[index++]; 712 if (j == 79) 713 break; 714 } 715 phba->ModelName[j] = 0; 716 continue; 717 } 718 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 719 phba->vpd_flag |= VPD_PROGRAM_TYPE; 720 index += 2; 721 i = vpd[index]; 722 index += 1; 723 j = 0; 724 Length -= (3+i); 725 while(i--) { 726 phba->ProgramType[j++] = vpd[index++]; 727 if (j == 255) 728 break; 729 } 730 phba->ProgramType[j] = 0; 731 continue; 732 } 733 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 734 phba->vpd_flag |= VPD_PORT; 735 index += 2; 736 i = vpd[index]; 737 index += 1; 738 j = 0; 739 Length -= (3+i); 740 while(i--) { 741 phba->Port[j++] = vpd[index++]; 742 if (j == 19) 743 break; 744 } 745 phba->Port[j] = 0; 746 continue; 747 } 748 else { 749 index += 2; 750 i = vpd[index]; 751 index += 1; 752 index += i; 753 Length -= (3 + i); 754 } 755 } 756 finished = 0; 757 break; 758 case 0x78: 759 finished = 1; 760 break; 761 default: 762 index ++; 763 break; 764 } 765 } while (!finished && (index < 108)); 766 767 return(1); 768 } 769 770 static void 771 lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 772 { 773 lpfc_vpd_t *vp; 774 uint32_t id; 775 char str[16]; 776 777 vp = &phba->vpd; 778 pci_read_config_dword(phba->pcidev, PCI_VENDOR_ID, &id); 779 780 switch ((id >> 16) & 0xffff) { 781 case PCI_DEVICE_ID_FIREFLY: 782 strcpy(str, "LP6000 1"); 783 break; 784 case PCI_DEVICE_ID_SUPERFLY: 785 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 786 strcpy(str, "LP7000 1"); 787 else 788 strcpy(str, "LP7000E 1"); 789 break; 790 case PCI_DEVICE_ID_DRAGONFLY: 791 strcpy(str, "LP8000 1"); 792 break; 793 case PCI_DEVICE_ID_CENTAUR: 794 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 795 strcpy(str, "LP9002 2"); 796 else 797 strcpy(str, "LP9000 1"); 798 break; 799 case PCI_DEVICE_ID_RFLY: 800 strcpy(str, "LP952 2"); 801 break; 802 case PCI_DEVICE_ID_PEGASUS: 803 strcpy(str, "LP9802 2"); 804 break; 805 case PCI_DEVICE_ID_THOR: 806 strcpy(str, "LP10000 2"); 807 break; 808 case PCI_DEVICE_ID_VIPER: 809 strcpy(str, "LPX1000 10"); 810 break; 811 case PCI_DEVICE_ID_PFLY: 812 strcpy(str, "LP982 2"); 813 break; 814 case PCI_DEVICE_ID_TFLY: 815 strcpy(str, "LP1050 2"); 816 break; 817 case PCI_DEVICE_ID_HELIOS: 818 strcpy(str, "LP11000 4"); 819 break; 820 case PCI_DEVICE_ID_BMID: 821 strcpy(str, "LP1150 4"); 822 break; 823 case PCI_DEVICE_ID_BSMB: 824 strcpy(str, "LP111 4"); 825 break; 826 case PCI_DEVICE_ID_ZEPHYR: 827 strcpy(str, "LP11000e 4"); 828 break; 829 case PCI_DEVICE_ID_ZMID: 830 strcpy(str, "LP1150e 4"); 831 break; 832 case PCI_DEVICE_ID_ZSMB: 833 strcpy(str, "LP111e 4"); 834 break; 835 case PCI_DEVICE_ID_LP101: 836 strcpy(str, "LP101 2"); 837 break; 838 case PCI_DEVICE_ID_LP10000S: 839 strcpy(str, "LP10000-S 2"); 840 break; 841 default: 842 memset(str, 0, 16); 843 break; 844 } 845 if (mdp) 846 sscanf(str, "%s", mdp); 847 if (descp) 848 sprintf(descp, "Emulex LightPulse %s Gigabit PCI Fibre " 849 "Channel Adapter", str); 850 } 851 852 /**************************************************/ 853 /* lpfc_post_buffer */ 854 /* */ 855 /* This routine will post count buffers to the */ 856 /* ring with the QUE_RING_BUF_CN command. This */ 857 /* allows 3 buffers / command to be posted. */ 858 /* Returns the number of buffers NOT posted. */ 859 /**************************************************/ 860 int 861 lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 862 int type) 863 { 864 IOCB_t *icmd; 865 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 866 struct lpfc_iocbq *iocb = NULL; 867 struct lpfc_dmabuf *mp1, *mp2; 868 869 cnt += pring->missbufcnt; 870 871 /* While there are buffers to post */ 872 while (cnt > 0) { 873 /* Allocate buffer for command iocb */ 874 spin_lock_irq(phba->host->host_lock); 875 list_remove_head(lpfc_iocb_list, iocb, struct lpfc_iocbq, list); 876 spin_unlock_irq(phba->host->host_lock); 877 if (iocb == NULL) { 878 pring->missbufcnt = cnt; 879 return cnt; 880 } 881 memset(iocb, 0, sizeof (struct lpfc_iocbq)); 882 icmd = &iocb->iocb; 883 884 /* 2 buffers can be posted per command */ 885 /* Allocate buffer to post */ 886 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 887 if (mp1) 888 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 889 &mp1->phys); 890 if (mp1 == 0 || mp1->virt == 0) { 891 if (mp1) 892 kfree(mp1); 893 spin_lock_irq(phba->host->host_lock); 894 list_add_tail(&iocb->list, lpfc_iocb_list); 895 spin_unlock_irq(phba->host->host_lock); 896 pring->missbufcnt = cnt; 897 return cnt; 898 } 899 900 INIT_LIST_HEAD(&mp1->list); 901 /* Allocate buffer to post */ 902 if (cnt > 1) { 903 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 904 if (mp2) 905 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 906 &mp2->phys); 907 if (mp2 == 0 || mp2->virt == 0) { 908 if (mp2) 909 kfree(mp2); 910 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 911 kfree(mp1); 912 spin_lock_irq(phba->host->host_lock); 913 list_add_tail(&iocb->list, lpfc_iocb_list); 914 spin_unlock_irq(phba->host->host_lock); 915 pring->missbufcnt = cnt; 916 return cnt; 917 } 918 919 INIT_LIST_HEAD(&mp2->list); 920 } else { 921 mp2 = NULL; 922 } 923 924 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 925 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 926 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 927 icmd->ulpBdeCount = 1; 928 cnt--; 929 if (mp2) { 930 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 931 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 932 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 933 cnt--; 934 icmd->ulpBdeCount = 2; 935 } 936 937 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 938 icmd->ulpLe = 1; 939 940 spin_lock_irq(phba->host->host_lock); 941 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 942 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 943 kfree(mp1); 944 cnt++; 945 if (mp2) { 946 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 947 kfree(mp2); 948 cnt++; 949 } 950 list_add_tail(&iocb->list, lpfc_iocb_list); 951 pring->missbufcnt = cnt; 952 spin_unlock_irq(phba->host->host_lock); 953 return cnt; 954 } 955 spin_unlock_irq(phba->host->host_lock); 956 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 957 if (mp2) { 958 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 959 } 960 } 961 pring->missbufcnt = 0; 962 return 0; 963 } 964 965 /************************************************************************/ 966 /* */ 967 /* lpfc_post_rcv_buf */ 968 /* This routine post initial rcv buffers to the configured rings */ 969 /* */ 970 /************************************************************************/ 971 static int 972 lpfc_post_rcv_buf(struct lpfc_hba * phba) 973 { 974 struct lpfc_sli *psli = &phba->sli; 975 976 /* Ring 0, ELS / CT buffers */ 977 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0, 1); 978 /* Ring 2 - FCP no buffers needed */ 979 980 return 0; 981 } 982 983 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 984 985 /************************************************************************/ 986 /* */ 987 /* lpfc_sha_init */ 988 /* */ 989 /************************************************************************/ 990 static void 991 lpfc_sha_init(uint32_t * HashResultPointer) 992 { 993 HashResultPointer[0] = 0x67452301; 994 HashResultPointer[1] = 0xEFCDAB89; 995 HashResultPointer[2] = 0x98BADCFE; 996 HashResultPointer[3] = 0x10325476; 997 HashResultPointer[4] = 0xC3D2E1F0; 998 } 999 1000 /************************************************************************/ 1001 /* */ 1002 /* lpfc_sha_iterate */ 1003 /* */ 1004 /************************************************************************/ 1005 static void 1006 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1007 { 1008 int t; 1009 uint32_t TEMP; 1010 uint32_t A, B, C, D, E; 1011 t = 16; 1012 do { 1013 HashWorkingPointer[t] = 1014 S(1, 1015 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1016 8] ^ 1017 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1018 } while (++t <= 79); 1019 t = 0; 1020 A = HashResultPointer[0]; 1021 B = HashResultPointer[1]; 1022 C = HashResultPointer[2]; 1023 D = HashResultPointer[3]; 1024 E = HashResultPointer[4]; 1025 1026 do { 1027 if (t < 20) { 1028 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1029 } else if (t < 40) { 1030 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1031 } else if (t < 60) { 1032 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1033 } else { 1034 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1035 } 1036 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1037 E = D; 1038 D = C; 1039 C = S(30, B); 1040 B = A; 1041 A = TEMP; 1042 } while (++t <= 79); 1043 1044 HashResultPointer[0] += A; 1045 HashResultPointer[1] += B; 1046 HashResultPointer[2] += C; 1047 HashResultPointer[3] += D; 1048 HashResultPointer[4] += E; 1049 1050 } 1051 1052 /************************************************************************/ 1053 /* */ 1054 /* lpfc_challenge_key */ 1055 /* */ 1056 /************************************************************************/ 1057 static void 1058 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1059 { 1060 *HashWorking = (*RandomChallenge ^ *HashWorking); 1061 } 1062 1063 /************************************************************************/ 1064 /* */ 1065 /* lpfc_hba_init */ 1066 /* */ 1067 /************************************************************************/ 1068 void 1069 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1070 { 1071 int t; 1072 uint32_t *HashWorking; 1073 uint32_t *pwwnn = phba->wwnn; 1074 1075 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1076 if (!HashWorking) 1077 return; 1078 1079 memset(HashWorking, 0, (80 * sizeof(uint32_t))); 1080 HashWorking[0] = HashWorking[78] = *pwwnn++; 1081 HashWorking[1] = HashWorking[79] = *pwwnn; 1082 1083 for (t = 0; t < 7; t++) 1084 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1085 1086 lpfc_sha_init(hbainit); 1087 lpfc_sha_iterate(hbainit, HashWorking); 1088 kfree(HashWorking); 1089 } 1090 1091 static void 1092 lpfc_cleanup(struct lpfc_hba * phba, uint32_t save_bind) 1093 { 1094 struct lpfc_nodelist *ndlp, *next_ndlp; 1095 1096 /* clean up phba - lpfc specific */ 1097 lpfc_can_disctmo(phba); 1098 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list, 1099 nlp_listp) { 1100 lpfc_nlp_remove(phba, ndlp); 1101 } 1102 1103 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list, 1104 nlp_listp) { 1105 lpfc_nlp_remove(phba, ndlp); 1106 } 1107 1108 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list, 1109 nlp_listp) { 1110 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1111 } 1112 1113 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list, 1114 nlp_listp) { 1115 lpfc_nlp_remove(phba, ndlp); 1116 } 1117 1118 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list, 1119 nlp_listp) { 1120 lpfc_nlp_remove(phba, ndlp); 1121 } 1122 1123 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_reglogin_list, 1124 nlp_listp) { 1125 lpfc_nlp_remove(phba, ndlp); 1126 } 1127 1128 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list, 1129 nlp_listp) { 1130 lpfc_nlp_remove(phba, ndlp); 1131 } 1132 1133 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list, 1134 nlp_listp) { 1135 lpfc_nlp_remove(phba, ndlp); 1136 } 1137 1138 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1139 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1140 INIT_LIST_HEAD(&phba->fc_unused_list); 1141 INIT_LIST_HEAD(&phba->fc_plogi_list); 1142 INIT_LIST_HEAD(&phba->fc_adisc_list); 1143 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1144 INIT_LIST_HEAD(&phba->fc_prli_list); 1145 INIT_LIST_HEAD(&phba->fc_npr_list); 1146 1147 phba->fc_map_cnt = 0; 1148 phba->fc_unmap_cnt = 0; 1149 phba->fc_plogi_cnt = 0; 1150 phba->fc_adisc_cnt = 0; 1151 phba->fc_reglogin_cnt = 0; 1152 phba->fc_prli_cnt = 0; 1153 phba->fc_npr_cnt = 0; 1154 phba->fc_unused_cnt= 0; 1155 return; 1156 } 1157 1158 static void 1159 lpfc_establish_link_tmo(unsigned long ptr) 1160 { 1161 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1162 unsigned long iflag; 1163 1164 1165 /* Re-establishing Link, timer expired */ 1166 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1167 "%d:1300 Re-establishing Link, timer expired " 1168 "Data: x%x x%x\n", 1169 phba->brd_no, phba->fc_flag, phba->hba_state); 1170 spin_lock_irqsave(phba->host->host_lock, iflag); 1171 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1172 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1173 } 1174 1175 static int 1176 lpfc_stop_timer(struct lpfc_hba * phba) 1177 { 1178 struct lpfc_sli *psli = &phba->sli; 1179 1180 /* Instead of a timer, this has been converted to a 1181 * deferred procedding list. 1182 */ 1183 while (!list_empty(&phba->freebufList)) { 1184 1185 struct lpfc_dmabuf *mp = NULL; 1186 1187 list_remove_head((&phba->freebufList), mp, 1188 struct lpfc_dmabuf, list); 1189 if (mp) { 1190 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1191 kfree(mp); 1192 } 1193 } 1194 1195 del_timer_sync(&phba->fc_estabtmo); 1196 del_timer_sync(&phba->fc_disctmo); 1197 del_timer_sync(&phba->fc_fdmitmo); 1198 del_timer_sync(&phba->els_tmofunc); 1199 psli = &phba->sli; 1200 del_timer_sync(&psli->mbox_tmo); 1201 return(1); 1202 } 1203 1204 int 1205 lpfc_online(struct lpfc_hba * phba) 1206 { 1207 if (!phba) 1208 return 0; 1209 1210 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1211 return 0; 1212 1213 lpfc_printf_log(phba, 1214 KERN_WARNING, 1215 LOG_INIT, 1216 "%d:0458 Bring Adapter online\n", 1217 phba->brd_no); 1218 1219 if (!lpfc_sli_queue_setup(phba)) 1220 return 1; 1221 1222 if (lpfc_sli_hba_setup(phba)) /* Initialize the HBA */ 1223 return 1; 1224 1225 spin_lock_irq(phba->host->host_lock); 1226 phba->fc_flag &= ~FC_OFFLINE_MODE; 1227 spin_unlock_irq(phba->host->host_lock); 1228 1229 /* 1230 * Restart all traffic to this host. Since the fc_transport block 1231 * functions (future) were not called in lpfc_offline, don't call them 1232 * here. 1233 */ 1234 scsi_unblock_requests(phba->host); 1235 return 0; 1236 } 1237 1238 int 1239 lpfc_offline(struct lpfc_hba * phba) 1240 { 1241 struct lpfc_sli_ring *pring; 1242 struct lpfc_sli *psli; 1243 unsigned long iflag; 1244 int i = 0; 1245 1246 if (!phba) 1247 return 0; 1248 1249 if (phba->fc_flag & FC_OFFLINE_MODE) 1250 return 0; 1251 1252 /* 1253 * Don't call the fc_transport block api (future). The device is 1254 * going offline and causing a timer to fire in the midlayer is 1255 * unproductive. Just block all new requests until the driver 1256 * comes back online. 1257 */ 1258 scsi_block_requests(phba->host); 1259 psli = &phba->sli; 1260 pring = &psli->ring[psli->fcp_ring]; 1261 1262 lpfc_linkdown(phba); 1263 1264 /* The linkdown event takes 30 seconds to timeout. */ 1265 while (pring->txcmplq_cnt) { 1266 mdelay(10); 1267 if (i++ > 3000) 1268 break; 1269 } 1270 1271 /* stop all timers associated with this hba */ 1272 lpfc_stop_timer(phba); 1273 phba->work_hba_events = 0; 1274 1275 lpfc_printf_log(phba, 1276 KERN_WARNING, 1277 LOG_INIT, 1278 "%d:0460 Bring Adapter offline\n", 1279 phba->brd_no); 1280 1281 /* Bring down the SLI Layer and cleanup. The HBA is offline 1282 now. */ 1283 lpfc_sli_hba_down(phba); 1284 lpfc_cleanup(phba, 1); 1285 spin_lock_irqsave(phba->host->host_lock, iflag); 1286 phba->fc_flag |= FC_OFFLINE_MODE; 1287 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1288 return 0; 1289 } 1290 1291 /****************************************************************************** 1292 * Function name: lpfc_scsi_free 1293 * 1294 * Description: Called from lpfc_pci_remove_one free internal driver resources 1295 * 1296 ******************************************************************************/ 1297 static int 1298 lpfc_scsi_free(struct lpfc_hba * phba) 1299 { 1300 struct lpfc_scsi_buf *sb, *sb_next; 1301 struct lpfc_iocbq *io, *io_next; 1302 1303 spin_lock_irq(phba->host->host_lock); 1304 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1305 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1306 list_del(&sb->list); 1307 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1308 sb->dma_handle); 1309 kfree(sb); 1310 phba->total_scsi_bufs--; 1311 } 1312 1313 /* Release all the lpfc_iocbq entries maintained by this host. */ 1314 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1315 list_del(&io->list); 1316 kfree(io); 1317 phba->total_iocbq_bufs--; 1318 } 1319 1320 spin_unlock_irq(phba->host->host_lock); 1321 1322 return 0; 1323 } 1324 1325 1326 static int __devinit 1327 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1328 { 1329 struct Scsi_Host *host; 1330 struct lpfc_hba *phba; 1331 struct lpfc_sli *psli; 1332 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1333 unsigned long bar0map_len, bar2map_len; 1334 int error = -ENODEV, retval; 1335 int i; 1336 u64 wwname; 1337 1338 if (pci_enable_device(pdev)) 1339 goto out; 1340 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1341 goto out_disable_device; 1342 1343 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1344 if (!host) 1345 goto out_release_regions; 1346 1347 phba = (struct lpfc_hba*)host->hostdata; 1348 memset(phba, 0, sizeof (struct lpfc_hba)); 1349 phba->host = host; 1350 1351 phba->fc_flag |= FC_LOADING; 1352 phba->pcidev = pdev; 1353 1354 /* Assign an unused board number */ 1355 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1356 goto out_put_host; 1357 1358 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1359 if (error) 1360 goto out_put_host; 1361 1362 host->unique_id = phba->brd_no; 1363 1364 INIT_LIST_HEAD(&phba->ctrspbuflist); 1365 INIT_LIST_HEAD(&phba->rnidrspbuflist); 1366 INIT_LIST_HEAD(&phba->freebufList); 1367 1368 /* Initialize timers used by driver */ 1369 init_timer(&phba->fc_estabtmo); 1370 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1371 phba->fc_estabtmo.data = (unsigned long)phba; 1372 init_timer(&phba->fc_disctmo); 1373 phba->fc_disctmo.function = lpfc_disc_timeout; 1374 phba->fc_disctmo.data = (unsigned long)phba; 1375 1376 init_timer(&phba->fc_fdmitmo); 1377 phba->fc_fdmitmo.function = lpfc_fdmi_tmo; 1378 phba->fc_fdmitmo.data = (unsigned long)phba; 1379 init_timer(&phba->els_tmofunc); 1380 phba->els_tmofunc.function = lpfc_els_timeout; 1381 phba->els_tmofunc.data = (unsigned long)phba; 1382 psli = &phba->sli; 1383 init_timer(&psli->mbox_tmo); 1384 psli->mbox_tmo.function = lpfc_mbox_timeout; 1385 psli->mbox_tmo.data = (unsigned long)phba; 1386 1387 /* 1388 * Get all the module params for configuring this host and then 1389 * establish the host parameters. 1390 */ 1391 lpfc_get_cfgparam(phba); 1392 1393 host->max_id = LPFC_MAX_TARGET; 1394 host->max_lun = phba->cfg_max_luns; 1395 host->this_id = -1; 1396 1397 /* Initialize all internally managed lists. */ 1398 INIT_LIST_HEAD(&phba->fc_nlpmap_list); 1399 INIT_LIST_HEAD(&phba->fc_nlpunmap_list); 1400 INIT_LIST_HEAD(&phba->fc_unused_list); 1401 INIT_LIST_HEAD(&phba->fc_plogi_list); 1402 INIT_LIST_HEAD(&phba->fc_adisc_list); 1403 INIT_LIST_HEAD(&phba->fc_reglogin_list); 1404 INIT_LIST_HEAD(&phba->fc_prli_list); 1405 INIT_LIST_HEAD(&phba->fc_npr_list); 1406 1407 1408 pci_set_master(pdev); 1409 retval = pci_set_mwi(pdev); 1410 if (retval) 1411 dev_printk(KERN_WARNING, &pdev->dev, 1412 "Warning: pci_set_mwi returned %d\n", retval); 1413 1414 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 1415 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 1416 goto out_idr_remove; 1417 1418 /* 1419 * Get the bus address of Bar0 and Bar2 and the number of bytes 1420 * required by each mapping. 1421 */ 1422 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 1423 bar0map_len = pci_resource_len(phba->pcidev, 0); 1424 1425 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 1426 bar2map_len = pci_resource_len(phba->pcidev, 2); 1427 1428 /* Map HBA SLIM and Control Registers to a kernel virtual address. */ 1429 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 1430 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 1431 1432 /* Allocate memory for SLI-2 structures */ 1433 phba->slim2p = dma_alloc_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1434 &phba->slim2p_mapping, GFP_KERNEL); 1435 if (!phba->slim2p) 1436 goto out_iounmap; 1437 1438 1439 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1440 lpfc_sli_setup(phba); 1441 lpfc_sli_queue_setup(phba); 1442 1443 error = lpfc_mem_alloc(phba); 1444 if (error) 1445 goto out_free_slim; 1446 1447 /* Initialize and populate the iocb list per host. */ 1448 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1449 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 1450 iocbq_entry = kmalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 1451 if (iocbq_entry == NULL) { 1452 printk(KERN_ERR "%s: only allocated %d iocbs of " 1453 "expected %d count. Unloading driver.\n", 1454 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 1455 error = -ENOMEM; 1456 goto out_free_iocbq; 1457 } 1458 1459 memset(iocbq_entry, 0, sizeof(struct lpfc_iocbq)); 1460 spin_lock_irq(phba->host->host_lock); 1461 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1462 phba->total_iocbq_bufs++; 1463 spin_unlock_irq(phba->host->host_lock); 1464 } 1465 1466 /* Initialize HBA structure */ 1467 phba->fc_edtov = FF_DEF_EDTOV; 1468 phba->fc_ratov = FF_DEF_RATOV; 1469 phba->fc_altov = FF_DEF_ALTOV; 1470 phba->fc_arbtov = FF_DEF_ARBTOV; 1471 1472 INIT_LIST_HEAD(&phba->work_list); 1473 phba->work_ha_mask = (HA_ERATT|HA_MBATT|HA_LATT); 1474 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 1475 1476 /* Startup the kernel thread for this host adapter. */ 1477 phba->worker_thread = kthread_run(lpfc_do_work, phba, 1478 "lpfc_worker_%d", phba->brd_no); 1479 if (IS_ERR(phba->worker_thread)) { 1480 error = PTR_ERR(phba->worker_thread); 1481 goto out_free_iocbq; 1482 } 1483 1484 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1485 host->can_queue = phba->cfg_hba_queue_depth - 10; 1486 1487 /* Tell the midlayer we support 16 byte commands */ 1488 host->max_cmd_len = 16; 1489 1490 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1491 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1492 1493 host->transportt = lpfc_transport_template; 1494 host->hostdata[0] = (unsigned long)phba; 1495 pci_set_drvdata(pdev, host); 1496 error = scsi_add_host(host, &pdev->dev); 1497 if (error) 1498 goto out_kthread_stop; 1499 1500 error = lpfc_alloc_sysfs_attr(phba); 1501 if (error) 1502 goto out_kthread_stop; 1503 1504 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, 1505 LPFC_DRIVER_NAME, phba); 1506 if (error) { 1507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1508 "%d:0451 Enable interrupt handler failed\n", 1509 phba->brd_no); 1510 goto out_free_sysfs_attr; 1511 } 1512 phba->MBslimaddr = phba->slim_memmap_p; 1513 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1514 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1515 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 1516 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 1517 1518 error = lpfc_sli_hba_setup(phba); 1519 if (error) 1520 goto out_free_irq; 1521 1522 /* 1523 * set fixed host attributes 1524 * Must done after lpfc_sli_hba_setup() 1525 */ 1526 1527 memcpy(&wwname, &phba->fc_nodename, sizeof(u64)); 1528 fc_host_node_name(host) = be64_to_cpu(wwname); 1529 memcpy(&wwname, &phba->fc_portname, sizeof(u64)); 1530 fc_host_port_name(host) = be64_to_cpu(wwname); 1531 fc_host_supported_classes(host) = FC_COS_CLASS3; 1532 1533 memset(fc_host_supported_fc4s(host), 0, 1534 sizeof(fc_host_supported_fc4s(host))); 1535 fc_host_supported_fc4s(host)[2] = 1; 1536 fc_host_supported_fc4s(host)[7] = 1; 1537 1538 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(host)); 1539 1540 fc_host_supported_speeds(host) = 0; 1541 switch (FC_JEDEC_ID(phba->vpd.rev.biuRev)) { 1542 case VIPER_JEDEC_ID: 1543 fc_host_supported_speeds(host) |= FC_PORTSPEED_10GBIT; 1544 break; 1545 case HELIOS_JEDEC_ID: 1546 fc_host_supported_speeds(host) |= FC_PORTSPEED_4GBIT; 1547 /* Fall through */ 1548 case CENTAUR_2G_JEDEC_ID: 1549 case PEGASUS_JEDEC_ID: 1550 case THOR_JEDEC_ID: 1551 fc_host_supported_speeds(host) |= FC_PORTSPEED_2GBIT; 1552 /* Fall through */ 1553 default: 1554 fc_host_supported_speeds(host) = FC_PORTSPEED_1GBIT; 1555 } 1556 1557 fc_host_maxframe_size(host) = 1558 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1559 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1560 1561 /* This value is also unchanging */ 1562 memset(fc_host_active_fc4s(host), 0, 1563 sizeof(fc_host_active_fc4s(host))); 1564 fc_host_active_fc4s(host)[2] = 1; 1565 fc_host_active_fc4s(host)[7] = 1; 1566 1567 spin_lock_irq(phba->host->host_lock); 1568 phba->fc_flag &= ~FC_LOADING; 1569 spin_unlock_irq(phba->host->host_lock); 1570 return 0; 1571 1572 out_free_irq: 1573 lpfc_stop_timer(phba); 1574 phba->work_hba_events = 0; 1575 free_irq(phba->pcidev->irq, phba); 1576 out_free_sysfs_attr: 1577 lpfc_free_sysfs_attr(phba); 1578 out_kthread_stop: 1579 kthread_stop(phba->worker_thread); 1580 out_free_iocbq: 1581 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1582 &phba->lpfc_iocb_list, list) { 1583 spin_lock_irq(phba->host->host_lock); 1584 kfree(iocbq_entry); 1585 phba->total_iocbq_bufs--; 1586 spin_unlock_irq(phba->host->host_lock); 1587 } 1588 lpfc_mem_free(phba); 1589 out_free_slim: 1590 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1591 phba->slim2p_mapping); 1592 out_iounmap: 1593 iounmap(phba->ctrl_regs_memmap_p); 1594 iounmap(phba->slim_memmap_p); 1595 out_idr_remove: 1596 idr_remove(&lpfc_hba_index, phba->brd_no); 1597 out_put_host: 1598 scsi_host_put(host); 1599 out_release_regions: 1600 pci_release_regions(pdev); 1601 out_disable_device: 1602 pci_disable_device(pdev); 1603 out: 1604 return error; 1605 } 1606 1607 static void __devexit 1608 lpfc_pci_remove_one(struct pci_dev *pdev) 1609 { 1610 struct Scsi_Host *host = pci_get_drvdata(pdev); 1611 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata[0]; 1612 unsigned long iflag; 1613 1614 lpfc_free_sysfs_attr(phba); 1615 1616 spin_lock_irqsave(phba->host->host_lock, iflag); 1617 phba->fc_flag |= FC_UNLOADING; 1618 1619 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1620 1621 fc_remove_host(phba->host); 1622 scsi_remove_host(phba->host); 1623 1624 kthread_stop(phba->worker_thread); 1625 1626 /* 1627 * Bring down the SLI Layer. This step disable all interrupts, 1628 * clears the rings, discards all mailbox commands, and resets 1629 * the HBA. 1630 */ 1631 lpfc_sli_hba_down(phba); 1632 1633 /* Release the irq reservation */ 1634 free_irq(phba->pcidev->irq, phba); 1635 1636 lpfc_cleanup(phba, 0); 1637 lpfc_stop_timer(phba); 1638 phba->work_hba_events = 0; 1639 1640 /* 1641 * Call scsi_free before mem_free since scsi bufs are released to their 1642 * corresponding pools here. 1643 */ 1644 lpfc_scsi_free(phba); 1645 lpfc_mem_free(phba); 1646 1647 /* Free resources associated with SLI2 interface */ 1648 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 1649 phba->slim2p, phba->slim2p_mapping); 1650 1651 /* unmap adapter SLIM and Control Registers */ 1652 iounmap(phba->ctrl_regs_memmap_p); 1653 iounmap(phba->slim_memmap_p); 1654 1655 pci_release_regions(phba->pcidev); 1656 pci_disable_device(phba->pcidev); 1657 1658 idr_remove(&lpfc_hba_index, phba->brd_no); 1659 scsi_host_put(phba->host); 1660 1661 pci_set_drvdata(pdev, NULL); 1662 } 1663 1664 static struct pci_device_id lpfc_id_table[] = { 1665 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 1666 PCI_ANY_ID, PCI_ANY_ID, }, 1667 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 1668 PCI_ANY_ID, PCI_ANY_ID, }, 1669 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 1670 PCI_ANY_ID, PCI_ANY_ID, }, 1671 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 1672 PCI_ANY_ID, PCI_ANY_ID, }, 1673 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 1674 PCI_ANY_ID, PCI_ANY_ID, }, 1675 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 1676 PCI_ANY_ID, PCI_ANY_ID, }, 1677 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 1678 PCI_ANY_ID, PCI_ANY_ID, }, 1679 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 1680 PCI_ANY_ID, PCI_ANY_ID, }, 1681 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 1682 PCI_ANY_ID, PCI_ANY_ID, }, 1683 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 1684 PCI_ANY_ID, PCI_ANY_ID, }, 1685 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 1686 PCI_ANY_ID, PCI_ANY_ID, }, 1687 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 1688 PCI_ANY_ID, PCI_ANY_ID, }, 1689 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 1690 PCI_ANY_ID, PCI_ANY_ID, }, 1691 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 1692 PCI_ANY_ID, PCI_ANY_ID, }, 1693 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 1694 PCI_ANY_ID, PCI_ANY_ID, }, 1695 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 1696 PCI_ANY_ID, PCI_ANY_ID, }, 1697 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 1698 PCI_ANY_ID, PCI_ANY_ID, }, 1699 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 1700 PCI_ANY_ID, PCI_ANY_ID, }, 1701 { 0 } 1702 }; 1703 1704 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 1705 1706 static struct pci_driver lpfc_driver = { 1707 .name = LPFC_DRIVER_NAME, 1708 .id_table = lpfc_id_table, 1709 .probe = lpfc_pci_probe_one, 1710 .remove = __devexit_p(lpfc_pci_remove_one), 1711 }; 1712 1713 static int __init 1714 lpfc_init(void) 1715 { 1716 int error = 0; 1717 1718 printk(LPFC_MODULE_DESC "\n"); 1719 printk(LPFC_COPYRIGHT "\n"); 1720 1721 lpfc_transport_template = 1722 fc_attach_transport(&lpfc_transport_functions); 1723 if (!lpfc_transport_template) 1724 return -ENOMEM; 1725 error = pci_register_driver(&lpfc_driver); 1726 if (error) 1727 fc_release_transport(lpfc_transport_template); 1728 1729 return error; 1730 } 1731 1732 static void __exit 1733 lpfc_exit(void) 1734 { 1735 pci_unregister_driver(&lpfc_driver); 1736 fc_release_transport(lpfc_transport_template); 1737 } 1738 1739 module_init(lpfc_init); 1740 module_exit(lpfc_exit); 1741 MODULE_LICENSE("GPL"); 1742 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 1743 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 1744 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 1745