1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_nl.h" 40 #include "lpfc_disc.h" 41 #include "lpfc_scsi.h" 42 #include "lpfc.h" 43 #include "lpfc_logmsg.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_vport.h" 46 #include "lpfc_version.h" 47 48 char *_dump_buf_data; 49 unsigned long _dump_buf_data_order; 50 char *_dump_buf_dif; 51 unsigned long _dump_buf_dif_order; 52 spinlock_t _dump_buf_lock; 53 54 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 55 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 56 static int lpfc_post_rcv_buf(struct lpfc_hba *); 57 58 static struct scsi_transport_template *lpfc_transport_template = NULL; 59 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 60 static DEFINE_IDR(lpfc_hba_index); 61 62 /** 63 * lpfc_config_port_prep: Perform lpfc initialization prior to config port. 64 * @phba: pointer to lpfc hba data structure. 65 * 66 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 67 * mailbox command. It retrieves the revision information from the HBA and 68 * collects the Vital Product Data (VPD) about the HBA for preparing the 69 * configuration of the HBA. 70 * 71 * Return codes: 72 * 0 - success. 73 * -ERESTART - requests the SLI layer to reset the HBA and try again. 74 * Any other value - indicates an error. 75 **/ 76 int 77 lpfc_config_port_prep(struct lpfc_hba *phba) 78 { 79 lpfc_vpd_t *vp = &phba->vpd; 80 int i = 0, rc; 81 LPFC_MBOXQ_t *pmb; 82 MAILBOX_t *mb; 83 char *lpfc_vpd_data = NULL; 84 uint16_t offset = 0; 85 static char licensed[56] = 86 "key unlock for use with gnu public licensed code only\0"; 87 static int init_key = 1; 88 89 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 90 if (!pmb) { 91 phba->link_state = LPFC_HBA_ERROR; 92 return -ENOMEM; 93 } 94 95 mb = &pmb->mb; 96 phba->link_state = LPFC_INIT_MBX_CMDS; 97 98 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 99 if (init_key) { 100 uint32_t *ptext = (uint32_t *) licensed; 101 102 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 103 *ptext = cpu_to_be32(*ptext); 104 init_key = 0; 105 } 106 107 lpfc_read_nv(phba, pmb); 108 memset((char*)mb->un.varRDnvp.rsvd3, 0, 109 sizeof (mb->un.varRDnvp.rsvd3)); 110 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 111 sizeof (licensed)); 112 113 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 114 115 if (rc != MBX_SUCCESS) { 116 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 117 "0324 Config Port initialization " 118 "error, mbxCmd x%x READ_NVPARM, " 119 "mbxStatus x%x\n", 120 mb->mbxCommand, mb->mbxStatus); 121 mempool_free(pmb, phba->mbox_mem_pool); 122 return -ERESTART; 123 } 124 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 125 sizeof(phba->wwnn)); 126 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 127 sizeof(phba->wwpn)); 128 } 129 130 phba->sli3_options = 0x0; 131 132 /* Setup and issue mailbox READ REV command */ 133 lpfc_read_rev(phba, pmb); 134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 135 if (rc != MBX_SUCCESS) { 136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 137 "0439 Adapter failed to init, mbxCmd x%x " 138 "READ_REV, mbxStatus x%x\n", 139 mb->mbxCommand, mb->mbxStatus); 140 mempool_free( pmb, phba->mbox_mem_pool); 141 return -ERESTART; 142 } 143 144 145 /* 146 * The value of rr must be 1 since the driver set the cv field to 1. 147 * This setting requires the FW to set all revision fields. 148 */ 149 if (mb->un.varRdRev.rr == 0) { 150 vp->rev.rBit = 0; 151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 152 "0440 Adapter failed to init, READ_REV has " 153 "missing revision information.\n"); 154 mempool_free(pmb, phba->mbox_mem_pool); 155 return -ERESTART; 156 } 157 158 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 159 mempool_free(pmb, phba->mbox_mem_pool); 160 return -EINVAL; 161 } 162 163 /* Save information as VPD data */ 164 vp->rev.rBit = 1; 165 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 166 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 167 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 168 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 169 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 170 vp->rev.biuRev = mb->un.varRdRev.biuRev; 171 vp->rev.smRev = mb->un.varRdRev.smRev; 172 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 173 vp->rev.endecRev = mb->un.varRdRev.endecRev; 174 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 175 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 176 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 177 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 178 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 179 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 180 181 /* If the sli feature level is less then 9, we must 182 * tear down all RPIs and VPIs on link down if NPIV 183 * is enabled. 184 */ 185 if (vp->rev.feaLevelHigh < 9) 186 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 187 188 if (lpfc_is_LC_HBA(phba->pcidev->device)) 189 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 190 sizeof (phba->RandomData)); 191 192 /* Get adapter VPD information */ 193 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 194 if (!lpfc_vpd_data) 195 goto out_free_mbox; 196 197 do { 198 lpfc_dump_mem(phba, pmb, offset); 199 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 200 201 if (rc != MBX_SUCCESS) { 202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 203 "0441 VPD not present on adapter, " 204 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 205 mb->mbxCommand, mb->mbxStatus); 206 mb->un.varDmp.word_cnt = 0; 207 } 208 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 209 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 210 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 211 lpfc_vpd_data + offset, 212 mb->un.varDmp.word_cnt); 213 offset += mb->un.varDmp.word_cnt; 214 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 215 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 216 217 kfree(lpfc_vpd_data); 218 out_free_mbox: 219 mempool_free(pmb, phba->mbox_mem_pool); 220 return 0; 221 } 222 223 /** 224 * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd. 225 * @phba: pointer to lpfc hba data structure. 226 * @pmboxq: pointer to the driver internal queue element for mailbox command. 227 * 228 * This is the completion handler for driver's configuring asynchronous event 229 * mailbox command to the device. If the mailbox command returns successfully, 230 * it will set internal async event support flag to 1; otherwise, it will 231 * set internal async event support flag to 0. 232 **/ 233 static void 234 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 235 { 236 if (pmboxq->mb.mbxStatus == MBX_SUCCESS) 237 phba->temp_sensor_support = 1; 238 else 239 phba->temp_sensor_support = 0; 240 mempool_free(pmboxq, phba->mbox_mem_pool); 241 return; 242 } 243 244 /** 245 * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox 246 * command used for getting wake up parameters. 247 * @phba: pointer to lpfc hba data structure. 248 * @pmboxq: pointer to the driver internal queue element for mailbox command. 249 * 250 * This is the completion handler for dump mailbox command for getting 251 * wake up parameters. When this command complete, the response contain 252 * Option rom version of the HBA. This function translate the version number 253 * into a human readable string and store it in OptionROMVersion. 254 **/ 255 static void 256 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 257 { 258 struct prog_id *prg; 259 uint32_t prog_id_word; 260 char dist = ' '; 261 /* character array used for decoding dist type. */ 262 char dist_char[] = "nabx"; 263 264 if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { 265 mempool_free(pmboxq, phba->mbox_mem_pool); 266 return; 267 } 268 269 prg = (struct prog_id *) &prog_id_word; 270 271 /* word 7 contain option rom version */ 272 prog_id_word = pmboxq->mb.un.varWords[7]; 273 274 /* Decode the Option rom version word to a readable string */ 275 if (prg->dist < 4) 276 dist = dist_char[prg->dist]; 277 278 if ((prg->dist == 3) && (prg->num == 0)) 279 sprintf(phba->OptionROMVersion, "%d.%d%d", 280 prg->ver, prg->rev, prg->lev); 281 else 282 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 283 prg->ver, prg->rev, prg->lev, 284 dist, prg->num); 285 mempool_free(pmboxq, phba->mbox_mem_pool); 286 return; 287 } 288 289 /** 290 * lpfc_config_port_post: Perform lpfc initialization after config port. 291 * @phba: pointer to lpfc hba data structure. 292 * 293 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 294 * command call. It performs all internal resource and state setups on the 295 * port: post IOCB buffers, enable appropriate host interrupt attentions, 296 * ELS ring timers, etc. 297 * 298 * Return codes 299 * 0 - success. 300 * Any other value - error. 301 **/ 302 int 303 lpfc_config_port_post(struct lpfc_hba *phba) 304 { 305 struct lpfc_vport *vport = phba->pport; 306 LPFC_MBOXQ_t *pmb; 307 MAILBOX_t *mb; 308 struct lpfc_dmabuf *mp; 309 struct lpfc_sli *psli = &phba->sli; 310 uint32_t status, timeout; 311 int i, j; 312 int rc; 313 314 spin_lock_irq(&phba->hbalock); 315 /* 316 * If the Config port completed correctly the HBA is not 317 * over heated any more. 318 */ 319 if (phba->over_temp_state == HBA_OVER_TEMP) 320 phba->over_temp_state = HBA_NORMAL_TEMP; 321 spin_unlock_irq(&phba->hbalock); 322 323 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 324 if (!pmb) { 325 phba->link_state = LPFC_HBA_ERROR; 326 return -ENOMEM; 327 } 328 mb = &pmb->mb; 329 330 /* Get login parameters for NID. */ 331 lpfc_read_sparam(phba, pmb, 0); 332 pmb->vport = vport; 333 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 335 "0448 Adapter failed init, mbxCmd x%x " 336 "READ_SPARM mbxStatus x%x\n", 337 mb->mbxCommand, mb->mbxStatus); 338 phba->link_state = LPFC_HBA_ERROR; 339 mp = (struct lpfc_dmabuf *) pmb->context1; 340 mempool_free( pmb, phba->mbox_mem_pool); 341 lpfc_mbuf_free(phba, mp->virt, mp->phys); 342 kfree(mp); 343 return -EIO; 344 } 345 346 mp = (struct lpfc_dmabuf *) pmb->context1; 347 348 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 349 lpfc_mbuf_free(phba, mp->virt, mp->phys); 350 kfree(mp); 351 pmb->context1 = NULL; 352 353 if (phba->cfg_soft_wwnn) 354 u64_to_wwn(phba->cfg_soft_wwnn, 355 vport->fc_sparam.nodeName.u.wwn); 356 if (phba->cfg_soft_wwpn) 357 u64_to_wwn(phba->cfg_soft_wwpn, 358 vport->fc_sparam.portName.u.wwn); 359 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 360 sizeof (struct lpfc_name)); 361 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 362 sizeof (struct lpfc_name)); 363 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 364 /* This should be consolidated into parse_vpd ? - mr */ 365 if (phba->SerialNumber[0] == 0) { 366 uint8_t *outptr; 367 368 outptr = &vport->fc_nodename.u.s.IEEE[0]; 369 for (i = 0; i < 12; i++) { 370 status = *outptr++; 371 j = ((status & 0xf0) >> 4); 372 if (j <= 9) 373 phba->SerialNumber[i] = 374 (char)((uint8_t) 0x30 + (uint8_t) j); 375 else 376 phba->SerialNumber[i] = 377 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 378 i++; 379 j = (status & 0xf); 380 if (j <= 9) 381 phba->SerialNumber[i] = 382 (char)((uint8_t) 0x30 + (uint8_t) j); 383 else 384 phba->SerialNumber[i] = 385 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 386 } 387 } 388 389 lpfc_read_config(phba, pmb); 390 pmb->vport = vport; 391 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 393 "0453 Adapter failed to init, mbxCmd x%x " 394 "READ_CONFIG, mbxStatus x%x\n", 395 mb->mbxCommand, mb->mbxStatus); 396 phba->link_state = LPFC_HBA_ERROR; 397 mempool_free( pmb, phba->mbox_mem_pool); 398 return -EIO; 399 } 400 401 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 402 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 403 phba->cfg_hba_queue_depth = 404 mb->un.varRdConfig.max_xri + 1; 405 406 phba->lmt = mb->un.varRdConfig.lmt; 407 408 /* Get the default values for Model Name and Description */ 409 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 410 411 if ((phba->cfg_link_speed > LINK_SPEED_10G) 412 || ((phba->cfg_link_speed == LINK_SPEED_1G) 413 && !(phba->lmt & LMT_1Gb)) 414 || ((phba->cfg_link_speed == LINK_SPEED_2G) 415 && !(phba->lmt & LMT_2Gb)) 416 || ((phba->cfg_link_speed == LINK_SPEED_4G) 417 && !(phba->lmt & LMT_4Gb)) 418 || ((phba->cfg_link_speed == LINK_SPEED_8G) 419 && !(phba->lmt & LMT_8Gb)) 420 || ((phba->cfg_link_speed == LINK_SPEED_10G) 421 && !(phba->lmt & LMT_10Gb))) { 422 /* Reset link speed to auto */ 423 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 424 "1302 Invalid speed for this board: " 425 "Reset link speed to auto: x%x\n", 426 phba->cfg_link_speed); 427 phba->cfg_link_speed = LINK_SPEED_AUTO; 428 } 429 430 phba->link_state = LPFC_LINK_DOWN; 431 432 /* Only process IOCBs on ELS ring till hba_state is READY */ 433 if (psli->ring[psli->extra_ring].cmdringaddr) 434 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 435 if (psli->ring[psli->fcp_ring].cmdringaddr) 436 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 437 if (psli->ring[psli->next_ring].cmdringaddr) 438 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 439 440 /* Post receive buffers for desired rings */ 441 if (phba->sli_rev != 3) 442 lpfc_post_rcv_buf(phba); 443 444 /* 445 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 446 */ 447 if (phba->intr_type == MSIX) { 448 rc = lpfc_config_msi(phba, pmb); 449 if (rc) { 450 mempool_free(pmb, phba->mbox_mem_pool); 451 return -EIO; 452 } 453 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 454 if (rc != MBX_SUCCESS) { 455 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 456 "0352 Config MSI mailbox command " 457 "failed, mbxCmd x%x, mbxStatus x%x\n", 458 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 459 mempool_free(pmb, phba->mbox_mem_pool); 460 return -EIO; 461 } 462 } 463 464 /* Initialize ERATT handling flag */ 465 phba->hba_flag &= ~HBA_ERATT_HANDLED; 466 467 /* Enable appropriate host interrupts */ 468 spin_lock_irq(&phba->hbalock); 469 status = readl(phba->HCregaddr); 470 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 471 if (psli->num_rings > 0) 472 status |= HC_R0INT_ENA; 473 if (psli->num_rings > 1) 474 status |= HC_R1INT_ENA; 475 if (psli->num_rings > 2) 476 status |= HC_R2INT_ENA; 477 if (psli->num_rings > 3) 478 status |= HC_R3INT_ENA; 479 480 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 481 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 482 status &= ~(HC_R0INT_ENA); 483 484 writel(status, phba->HCregaddr); 485 readl(phba->HCregaddr); /* flush */ 486 spin_unlock_irq(&phba->hbalock); 487 488 /* Set up ring-0 (ELS) timer */ 489 timeout = phba->fc_ratov * 2; 490 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 491 /* Set up heart beat (HB) timer */ 492 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 493 phba->hb_outstanding = 0; 494 phba->last_completion_time = jiffies; 495 /* Set up error attention (ERATT) polling timer */ 496 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 497 498 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 499 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 500 lpfc_set_loopback_flag(phba); 501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 502 if (rc != MBX_SUCCESS) { 503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 504 "0454 Adapter failed to init, mbxCmd x%x " 505 "INIT_LINK, mbxStatus x%x\n", 506 mb->mbxCommand, mb->mbxStatus); 507 508 /* Clear all interrupt enable conditions */ 509 writel(0, phba->HCregaddr); 510 readl(phba->HCregaddr); /* flush */ 511 /* Clear all pending interrupts */ 512 writel(0xffffffff, phba->HAregaddr); 513 readl(phba->HAregaddr); /* flush */ 514 515 phba->link_state = LPFC_HBA_ERROR; 516 if (rc != MBX_BUSY) 517 mempool_free(pmb, phba->mbox_mem_pool); 518 return -EIO; 519 } 520 /* MBOX buffer will be freed in mbox compl */ 521 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 522 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 523 pmb->mbox_cmpl = lpfc_config_async_cmpl; 524 pmb->vport = phba->pport; 525 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 526 527 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 528 lpfc_printf_log(phba, 529 KERN_ERR, 530 LOG_INIT, 531 "0456 Adapter failed to issue " 532 "ASYNCEVT_ENABLE mbox status x%x \n.", 533 rc); 534 mempool_free(pmb, phba->mbox_mem_pool); 535 } 536 537 /* Get Option rom version */ 538 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 539 lpfc_dump_wakeup_param(phba, pmb); 540 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 541 pmb->vport = phba->pport; 542 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 543 544 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 546 "to get Option ROM version status x%x\n.", rc); 547 mempool_free(pmb, phba->mbox_mem_pool); 548 } 549 550 return 0; 551 } 552 553 /** 554 * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset. 555 * @phba: pointer to lpfc HBA data structure. 556 * 557 * This routine will do LPFC uninitialization before the HBA is reset when 558 * bringing down the SLI Layer. 559 * 560 * Return codes 561 * 0 - success. 562 * Any other value - error. 563 **/ 564 int 565 lpfc_hba_down_prep(struct lpfc_hba *phba) 566 { 567 struct lpfc_vport **vports; 568 int i; 569 /* Disable interrupts */ 570 writel(0, phba->HCregaddr); 571 readl(phba->HCregaddr); /* flush */ 572 573 if (phba->pport->load_flag & FC_UNLOADING) 574 lpfc_cleanup_discovery_resources(phba->pport); 575 else { 576 vports = lpfc_create_vport_work_array(phba); 577 if (vports != NULL) 578 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 579 lpfc_cleanup_discovery_resources(vports[i]); 580 lpfc_destroy_vport_work_array(phba, vports); 581 } 582 return 0; 583 } 584 585 /** 586 * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset. 587 * @phba: pointer to lpfc HBA data structure. 588 * 589 * This routine will do uninitialization after the HBA is reset when bring 590 * down the SLI Layer. 591 * 592 * Return codes 593 * 0 - sucess. 594 * Any other value - error. 595 **/ 596 int 597 lpfc_hba_down_post(struct lpfc_hba *phba) 598 { 599 struct lpfc_sli *psli = &phba->sli; 600 struct lpfc_sli_ring *pring; 601 struct lpfc_dmabuf *mp, *next_mp; 602 struct lpfc_iocbq *iocb; 603 IOCB_t *cmd = NULL; 604 LIST_HEAD(completions); 605 int i; 606 607 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 608 lpfc_sli_hbqbuf_free_all(phba); 609 else { 610 /* Cleanup preposted buffers on the ELS ring */ 611 pring = &psli->ring[LPFC_ELS_RING]; 612 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 613 list_del(&mp->list); 614 pring->postbufq_cnt--; 615 lpfc_mbuf_free(phba, mp->virt, mp->phys); 616 kfree(mp); 617 } 618 } 619 620 spin_lock_irq(&phba->hbalock); 621 for (i = 0; i < psli->num_rings; i++) { 622 pring = &psli->ring[i]; 623 624 /* At this point in time the HBA is either reset or DOA. Either 625 * way, nothing should be on txcmplq as it will NEVER complete. 626 */ 627 list_splice_init(&pring->txcmplq, &completions); 628 pring->txcmplq_cnt = 0; 629 spin_unlock_irq(&phba->hbalock); 630 631 while (!list_empty(&completions)) { 632 iocb = list_get_first(&completions, struct lpfc_iocbq, 633 list); 634 cmd = &iocb->iocb; 635 list_del_init(&iocb->list); 636 637 if (!iocb->iocb_cmpl) 638 lpfc_sli_release_iocbq(phba, iocb); 639 else { 640 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 641 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 642 (iocb->iocb_cmpl) (phba, iocb, iocb); 643 } 644 } 645 646 lpfc_sli_abort_iocb_ring(phba, pring); 647 spin_lock_irq(&phba->hbalock); 648 } 649 spin_unlock_irq(&phba->hbalock); 650 651 return 0; 652 } 653 654 /** 655 * lpfc_hb_timeout: The HBA-timer timeout handler. 656 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 657 * 658 * This is the HBA-timer timeout handler registered to the lpfc driver. When 659 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 660 * work-port-events bitmap and the worker thread is notified. This timeout 661 * event will be used by the worker thread to invoke the actual timeout 662 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 663 * be performed in the timeout handler and the HBA timeout event bit shall 664 * be cleared by the worker thread after it has taken the event bitmap out. 665 **/ 666 static void 667 lpfc_hb_timeout(unsigned long ptr) 668 { 669 struct lpfc_hba *phba; 670 uint32_t tmo_posted; 671 unsigned long iflag; 672 673 phba = (struct lpfc_hba *)ptr; 674 675 /* Check for heart beat timeout conditions */ 676 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 677 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 678 if (!tmo_posted) 679 phba->pport->work_port_events |= WORKER_HB_TMO; 680 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 681 682 /* Tell the worker thread there is work to do */ 683 if (!tmo_posted) 684 lpfc_worker_wake_up(phba); 685 return; 686 } 687 688 /** 689 * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function. 690 * @phba: pointer to lpfc hba data structure. 691 * @pmboxq: pointer to the driver internal queue element for mailbox command. 692 * 693 * This is the callback function to the lpfc heart-beat mailbox command. 694 * If configured, the lpfc driver issues the heart-beat mailbox command to 695 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 696 * heart-beat mailbox command is issued, the driver shall set up heart-beat 697 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 698 * heart-beat outstanding state. Once the mailbox command comes back and 699 * no error conditions detected, the heart-beat mailbox command timer is 700 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 701 * state is cleared for the next heart-beat. If the timer expired with the 702 * heart-beat outstanding state set, the driver will put the HBA offline. 703 **/ 704 static void 705 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 706 { 707 unsigned long drvr_flag; 708 709 spin_lock_irqsave(&phba->hbalock, drvr_flag); 710 phba->hb_outstanding = 0; 711 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 712 713 /* Check and reset heart-beat timer is necessary */ 714 mempool_free(pmboxq, phba->mbox_mem_pool); 715 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 716 !(phba->link_state == LPFC_HBA_ERROR) && 717 !(phba->pport->load_flag & FC_UNLOADING)) 718 mod_timer(&phba->hb_tmofunc, 719 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 720 return; 721 } 722 723 /** 724 * lpfc_hb_timeout_handler: The HBA-timer timeout handler. 725 * @phba: pointer to lpfc hba data structure. 726 * 727 * This is the actual HBA-timer timeout handler to be invoked by the worker 728 * thread whenever the HBA timer fired and HBA-timeout event posted. This 729 * handler performs any periodic operations needed for the device. If such 730 * periodic event has already been attended to either in the interrupt handler 731 * or by processing slow-ring or fast-ring events within the HBA-timer 732 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 733 * the timer for the next timeout period. If lpfc heart-beat mailbox command 734 * is configured and there is no heart-beat mailbox command outstanding, a 735 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 736 * has been a heart-beat mailbox command outstanding, the HBA shall be put 737 * to offline. 738 **/ 739 void 740 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 741 { 742 LPFC_MBOXQ_t *pmboxq; 743 struct lpfc_dmabuf *buf_ptr; 744 int retval; 745 struct lpfc_sli *psli = &phba->sli; 746 LIST_HEAD(completions); 747 748 if ((phba->link_state == LPFC_HBA_ERROR) || 749 (phba->pport->load_flag & FC_UNLOADING) || 750 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 751 return; 752 753 spin_lock_irq(&phba->pport->work_port_lock); 754 755 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 756 jiffies)) { 757 spin_unlock_irq(&phba->pport->work_port_lock); 758 if (!phba->hb_outstanding) 759 mod_timer(&phba->hb_tmofunc, 760 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 761 else 762 mod_timer(&phba->hb_tmofunc, 763 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 764 return; 765 } 766 spin_unlock_irq(&phba->pport->work_port_lock); 767 768 if (phba->elsbuf_cnt && 769 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 770 spin_lock_irq(&phba->hbalock); 771 list_splice_init(&phba->elsbuf, &completions); 772 phba->elsbuf_cnt = 0; 773 phba->elsbuf_prev_cnt = 0; 774 spin_unlock_irq(&phba->hbalock); 775 776 while (!list_empty(&completions)) { 777 list_remove_head(&completions, buf_ptr, 778 struct lpfc_dmabuf, list); 779 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 780 kfree(buf_ptr); 781 } 782 } 783 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 784 785 /* If there is no heart beat outstanding, issue a heartbeat command */ 786 if (phba->cfg_enable_hba_heartbeat) { 787 if (!phba->hb_outstanding) { 788 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 789 if (!pmboxq) { 790 mod_timer(&phba->hb_tmofunc, 791 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 792 return; 793 } 794 795 lpfc_heart_beat(phba, pmboxq); 796 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 797 pmboxq->vport = phba->pport; 798 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 799 800 if (retval != MBX_BUSY && retval != MBX_SUCCESS) { 801 mempool_free(pmboxq, phba->mbox_mem_pool); 802 mod_timer(&phba->hb_tmofunc, 803 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 804 return; 805 } 806 mod_timer(&phba->hb_tmofunc, 807 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 808 phba->hb_outstanding = 1; 809 return; 810 } else { 811 /* 812 * If heart beat timeout called with hb_outstanding set 813 * we need to take the HBA offline. 814 */ 815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 816 "0459 Adapter heartbeat failure, " 817 "taking this port offline.\n"); 818 819 spin_lock_irq(&phba->hbalock); 820 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 821 spin_unlock_irq(&phba->hbalock); 822 823 lpfc_offline_prep(phba); 824 lpfc_offline(phba); 825 lpfc_unblock_mgmt_io(phba); 826 phba->link_state = LPFC_HBA_ERROR; 827 lpfc_hba_down_post(phba); 828 } 829 } 830 } 831 832 /** 833 * lpfc_offline_eratt: Bring lpfc offline on hardware error attention. 834 * @phba: pointer to lpfc hba data structure. 835 * 836 * This routine is called to bring the HBA offline when HBA hardware error 837 * other than Port Error 6 has been detected. 838 **/ 839 static void 840 lpfc_offline_eratt(struct lpfc_hba *phba) 841 { 842 struct lpfc_sli *psli = &phba->sli; 843 844 spin_lock_irq(&phba->hbalock); 845 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 846 spin_unlock_irq(&phba->hbalock); 847 lpfc_offline_prep(phba); 848 849 lpfc_offline(phba); 850 lpfc_reset_barrier(phba); 851 lpfc_sli_brdreset(phba); 852 lpfc_hba_down_post(phba); 853 lpfc_sli_brdready(phba, HS_MBRDY); 854 lpfc_unblock_mgmt_io(phba); 855 phba->link_state = LPFC_HBA_ERROR; 856 return; 857 } 858 859 /** 860 * lpfc_handle_eratt: The HBA hardware error handler. 861 * @phba: pointer to lpfc hba data structure. 862 * 863 * This routine is invoked to handle the following HBA hardware error 864 * conditions: 865 * 1 - HBA error attention interrupt 866 * 2 - DMA ring index out of range 867 * 3 - Mailbox command came back as unknown 868 **/ 869 void 870 lpfc_handle_eratt(struct lpfc_hba *phba) 871 { 872 struct lpfc_vport *vport = phba->pport; 873 struct lpfc_sli *psli = &phba->sli; 874 struct lpfc_sli_ring *pring; 875 uint32_t event_data; 876 unsigned long temperature; 877 struct temp_event temp_event_data; 878 struct Scsi_Host *shost; 879 struct lpfc_board_event_header board_event; 880 881 /* If the pci channel is offline, ignore possible errors, 882 * since we cannot communicate with the pci card anyway. */ 883 if (pci_channel_offline(phba->pcidev)) 884 return; 885 /* If resets are disabled then leave the HBA alone and return */ 886 if (!phba->cfg_enable_hba_reset) 887 return; 888 889 /* Send an internal error event to mgmt application */ 890 board_event.event_type = FC_REG_BOARD_EVENT; 891 board_event.subcategory = LPFC_EVENT_PORTINTERR; 892 shost = lpfc_shost_from_vport(phba->pport); 893 fc_host_post_vendor_event(shost, fc_get_event_number(), 894 sizeof(board_event), 895 (char *) &board_event, 896 LPFC_NL_VENDOR_ID); 897 898 if (phba->work_hs & HS_FFER6) { 899 /* Re-establishing Link */ 900 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 901 "1301 Re-establishing Link " 902 "Data: x%x x%x x%x\n", 903 phba->work_hs, 904 phba->work_status[0], phba->work_status[1]); 905 906 spin_lock_irq(&phba->hbalock); 907 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 908 spin_unlock_irq(&phba->hbalock); 909 910 /* 911 * Firmware stops when it triggled erratt with HS_FFER6. 912 * That could cause the I/Os dropped by the firmware. 913 * Error iocb (I/O) on txcmplq and let the SCSI layer 914 * retry it after re-establishing link. 915 */ 916 pring = &psli->ring[psli->fcp_ring]; 917 lpfc_sli_abort_iocb_ring(phba, pring); 918 919 /* 920 * There was a firmware error. Take the hba offline and then 921 * attempt to restart it. 922 */ 923 lpfc_offline_prep(phba); 924 lpfc_offline(phba); 925 lpfc_sli_brdrestart(phba); 926 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 927 lpfc_unblock_mgmt_io(phba); 928 return; 929 } 930 lpfc_unblock_mgmt_io(phba); 931 } else if (phba->work_hs & HS_CRIT_TEMP) { 932 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 933 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 934 temp_event_data.event_code = LPFC_CRIT_TEMP; 935 temp_event_data.data = (uint32_t)temperature; 936 937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 938 "0406 Adapter maximum temperature exceeded " 939 "(%ld), taking this port offline " 940 "Data: x%x x%x x%x\n", 941 temperature, phba->work_hs, 942 phba->work_status[0], phba->work_status[1]); 943 944 shost = lpfc_shost_from_vport(phba->pport); 945 fc_host_post_vendor_event(shost, fc_get_event_number(), 946 sizeof(temp_event_data), 947 (char *) &temp_event_data, 948 SCSI_NL_VID_TYPE_PCI 949 | PCI_VENDOR_ID_EMULEX); 950 951 spin_lock_irq(&phba->hbalock); 952 phba->over_temp_state = HBA_OVER_TEMP; 953 spin_unlock_irq(&phba->hbalock); 954 lpfc_offline_eratt(phba); 955 956 } else { 957 /* The if clause above forces this code path when the status 958 * failure is a value other than FFER6. Do not call the offline 959 * twice. This is the adapter hardware error path. 960 */ 961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 962 "0457 Adapter Hardware Error " 963 "Data: x%x x%x x%x\n", 964 phba->work_hs, 965 phba->work_status[0], phba->work_status[1]); 966 967 event_data = FC_REG_DUMP_EVENT; 968 shost = lpfc_shost_from_vport(vport); 969 fc_host_post_vendor_event(shost, fc_get_event_number(), 970 sizeof(event_data), (char *) &event_data, 971 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 972 973 lpfc_offline_eratt(phba); 974 } 975 return; 976 } 977 978 /** 979 * lpfc_handle_latt: The HBA link event handler. 980 * @phba: pointer to lpfc hba data structure. 981 * 982 * This routine is invoked from the worker thread to handle a HBA host 983 * attention link event. 984 **/ 985 void 986 lpfc_handle_latt(struct lpfc_hba *phba) 987 { 988 struct lpfc_vport *vport = phba->pport; 989 struct lpfc_sli *psli = &phba->sli; 990 LPFC_MBOXQ_t *pmb; 991 volatile uint32_t control; 992 struct lpfc_dmabuf *mp; 993 int rc = 0; 994 995 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 996 if (!pmb) { 997 rc = 1; 998 goto lpfc_handle_latt_err_exit; 999 } 1000 1001 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1002 if (!mp) { 1003 rc = 2; 1004 goto lpfc_handle_latt_free_pmb; 1005 } 1006 1007 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1008 if (!mp->virt) { 1009 rc = 3; 1010 goto lpfc_handle_latt_free_mp; 1011 } 1012 1013 /* Cleanup any outstanding ELS commands */ 1014 lpfc_els_flush_all_cmd(phba); 1015 1016 psli->slistat.link_event++; 1017 lpfc_read_la(phba, pmb, mp); 1018 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 1019 pmb->vport = vport; 1020 /* Block ELS IOCBs until we have processed this mbox command */ 1021 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1022 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1023 if (rc == MBX_NOT_FINISHED) { 1024 rc = 4; 1025 goto lpfc_handle_latt_free_mbuf; 1026 } 1027 1028 /* Clear Link Attention in HA REG */ 1029 spin_lock_irq(&phba->hbalock); 1030 writel(HA_LATT, phba->HAregaddr); 1031 readl(phba->HAregaddr); /* flush */ 1032 spin_unlock_irq(&phba->hbalock); 1033 1034 return; 1035 1036 lpfc_handle_latt_free_mbuf: 1037 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1038 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1039 lpfc_handle_latt_free_mp: 1040 kfree(mp); 1041 lpfc_handle_latt_free_pmb: 1042 mempool_free(pmb, phba->mbox_mem_pool); 1043 lpfc_handle_latt_err_exit: 1044 /* Enable Link attention interrupts */ 1045 spin_lock_irq(&phba->hbalock); 1046 psli->sli_flag |= LPFC_PROCESS_LA; 1047 control = readl(phba->HCregaddr); 1048 control |= HC_LAINT_ENA; 1049 writel(control, phba->HCregaddr); 1050 readl(phba->HCregaddr); /* flush */ 1051 1052 /* Clear Link Attention in HA REG */ 1053 writel(HA_LATT, phba->HAregaddr); 1054 readl(phba->HAregaddr); /* flush */ 1055 spin_unlock_irq(&phba->hbalock); 1056 lpfc_linkdown(phba); 1057 phba->link_state = LPFC_HBA_ERROR; 1058 1059 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1060 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1061 1062 return; 1063 } 1064 1065 /** 1066 * lpfc_parse_vpd: Parse VPD (Vital Product Data). 1067 * @phba: pointer to lpfc hba data structure. 1068 * @vpd: pointer to the vital product data. 1069 * @len: length of the vital product data in bytes. 1070 * 1071 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1072 * an array of characters. In this routine, the ModelName, ProgramType, and 1073 * ModelDesc, etc. fields of the phba data structure will be populated. 1074 * 1075 * Return codes 1076 * 0 - pointer to the VPD passed in is NULL 1077 * 1 - success 1078 **/ 1079 static int 1080 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1081 { 1082 uint8_t lenlo, lenhi; 1083 int Length; 1084 int i, j; 1085 int finished = 0; 1086 int index = 0; 1087 1088 if (!vpd) 1089 return 0; 1090 1091 /* Vital Product */ 1092 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1093 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1094 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1095 (uint32_t) vpd[3]); 1096 while (!finished && (index < (len - 4))) { 1097 switch (vpd[index]) { 1098 case 0x82: 1099 case 0x91: 1100 index += 1; 1101 lenlo = vpd[index]; 1102 index += 1; 1103 lenhi = vpd[index]; 1104 index += 1; 1105 i = ((((unsigned short)lenhi) << 8) + lenlo); 1106 index += i; 1107 break; 1108 case 0x90: 1109 index += 1; 1110 lenlo = vpd[index]; 1111 index += 1; 1112 lenhi = vpd[index]; 1113 index += 1; 1114 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1115 if (Length > len - index) 1116 Length = len - index; 1117 while (Length > 0) { 1118 /* Look for Serial Number */ 1119 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1120 index += 2; 1121 i = vpd[index]; 1122 index += 1; 1123 j = 0; 1124 Length -= (3+i); 1125 while(i--) { 1126 phba->SerialNumber[j++] = vpd[index++]; 1127 if (j == 31) 1128 break; 1129 } 1130 phba->SerialNumber[j] = 0; 1131 continue; 1132 } 1133 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1134 phba->vpd_flag |= VPD_MODEL_DESC; 1135 index += 2; 1136 i = vpd[index]; 1137 index += 1; 1138 j = 0; 1139 Length -= (3+i); 1140 while(i--) { 1141 phba->ModelDesc[j++] = vpd[index++]; 1142 if (j == 255) 1143 break; 1144 } 1145 phba->ModelDesc[j] = 0; 1146 continue; 1147 } 1148 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1149 phba->vpd_flag |= VPD_MODEL_NAME; 1150 index += 2; 1151 i = vpd[index]; 1152 index += 1; 1153 j = 0; 1154 Length -= (3+i); 1155 while(i--) { 1156 phba->ModelName[j++] = vpd[index++]; 1157 if (j == 79) 1158 break; 1159 } 1160 phba->ModelName[j] = 0; 1161 continue; 1162 } 1163 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1164 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1165 index += 2; 1166 i = vpd[index]; 1167 index += 1; 1168 j = 0; 1169 Length -= (3+i); 1170 while(i--) { 1171 phba->ProgramType[j++] = vpd[index++]; 1172 if (j == 255) 1173 break; 1174 } 1175 phba->ProgramType[j] = 0; 1176 continue; 1177 } 1178 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1179 phba->vpd_flag |= VPD_PORT; 1180 index += 2; 1181 i = vpd[index]; 1182 index += 1; 1183 j = 0; 1184 Length -= (3+i); 1185 while(i--) { 1186 phba->Port[j++] = vpd[index++]; 1187 if (j == 19) 1188 break; 1189 } 1190 phba->Port[j] = 0; 1191 continue; 1192 } 1193 else { 1194 index += 2; 1195 i = vpd[index]; 1196 index += 1; 1197 index += i; 1198 Length -= (3 + i); 1199 } 1200 } 1201 finished = 0; 1202 break; 1203 case 0x78: 1204 finished = 1; 1205 break; 1206 default: 1207 index ++; 1208 break; 1209 } 1210 } 1211 1212 return(1); 1213 } 1214 1215 /** 1216 * lpfc_get_hba_model_desc: Retrieve HBA device model name and description. 1217 * @phba: pointer to lpfc hba data structure. 1218 * @mdp: pointer to the data structure to hold the derived model name. 1219 * @descp: pointer to the data structure to hold the derived description. 1220 * 1221 * This routine retrieves HBA's description based on its registered PCI device 1222 * ID. The @descp passed into this function points to an array of 256 chars. It 1223 * shall be returned with the model name, maximum speed, and the host bus type. 1224 * The @mdp passed into this function points to an array of 80 chars. When the 1225 * function returns, the @mdp will be filled with the model name. 1226 **/ 1227 static void 1228 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1229 { 1230 lpfc_vpd_t *vp; 1231 uint16_t dev_id = phba->pcidev->device; 1232 int max_speed; 1233 int GE = 0; 1234 struct { 1235 char * name; 1236 int max_speed; 1237 char * bus; 1238 } m = {"<Unknown>", 0, ""}; 1239 1240 if (mdp && mdp[0] != '\0' 1241 && descp && descp[0] != '\0') 1242 return; 1243 1244 if (phba->lmt & LMT_10Gb) 1245 max_speed = 10; 1246 else if (phba->lmt & LMT_8Gb) 1247 max_speed = 8; 1248 else if (phba->lmt & LMT_4Gb) 1249 max_speed = 4; 1250 else if (phba->lmt & LMT_2Gb) 1251 max_speed = 2; 1252 else 1253 max_speed = 1; 1254 1255 vp = &phba->vpd; 1256 1257 switch (dev_id) { 1258 case PCI_DEVICE_ID_FIREFLY: 1259 m = (typeof(m)){"LP6000", max_speed, "PCI"}; 1260 break; 1261 case PCI_DEVICE_ID_SUPERFLY: 1262 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1263 m = (typeof(m)){"LP7000", max_speed, "PCI"}; 1264 else 1265 m = (typeof(m)){"LP7000E", max_speed, "PCI"}; 1266 break; 1267 case PCI_DEVICE_ID_DRAGONFLY: 1268 m = (typeof(m)){"LP8000", max_speed, "PCI"}; 1269 break; 1270 case PCI_DEVICE_ID_CENTAUR: 1271 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1272 m = (typeof(m)){"LP9002", max_speed, "PCI"}; 1273 else 1274 m = (typeof(m)){"LP9000", max_speed, "PCI"}; 1275 break; 1276 case PCI_DEVICE_ID_RFLY: 1277 m = (typeof(m)){"LP952", max_speed, "PCI"}; 1278 break; 1279 case PCI_DEVICE_ID_PEGASUS: 1280 m = (typeof(m)){"LP9802", max_speed, "PCI-X"}; 1281 break; 1282 case PCI_DEVICE_ID_THOR: 1283 m = (typeof(m)){"LP10000", max_speed, "PCI-X"}; 1284 break; 1285 case PCI_DEVICE_ID_VIPER: 1286 m = (typeof(m)){"LPX1000", max_speed, "PCI-X"}; 1287 break; 1288 case PCI_DEVICE_ID_PFLY: 1289 m = (typeof(m)){"LP982", max_speed, "PCI-X"}; 1290 break; 1291 case PCI_DEVICE_ID_TFLY: 1292 m = (typeof(m)){"LP1050", max_speed, "PCI-X"}; 1293 break; 1294 case PCI_DEVICE_ID_HELIOS: 1295 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"}; 1296 break; 1297 case PCI_DEVICE_ID_HELIOS_SCSP: 1298 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"}; 1299 break; 1300 case PCI_DEVICE_ID_HELIOS_DCSP: 1301 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"}; 1302 break; 1303 case PCI_DEVICE_ID_NEPTUNE: 1304 m = (typeof(m)){"LPe1000", max_speed, "PCIe"}; 1305 break; 1306 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1307 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"}; 1308 break; 1309 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1310 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"}; 1311 break; 1312 case PCI_DEVICE_ID_BMID: 1313 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"}; 1314 break; 1315 case PCI_DEVICE_ID_BSMB: 1316 m = (typeof(m)){"LP111", max_speed, "PCI-X2"}; 1317 break; 1318 case PCI_DEVICE_ID_ZEPHYR: 1319 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1320 break; 1321 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1322 m = (typeof(m)){"LPe11000", max_speed, "PCIe"}; 1323 break; 1324 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1325 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"}; 1326 break; 1327 case PCI_DEVICE_ID_ZMID: 1328 m = (typeof(m)){"LPe1150", max_speed, "PCIe"}; 1329 break; 1330 case PCI_DEVICE_ID_ZSMB: 1331 m = (typeof(m)){"LPe111", max_speed, "PCIe"}; 1332 break; 1333 case PCI_DEVICE_ID_LP101: 1334 m = (typeof(m)){"LP101", max_speed, "PCI-X"}; 1335 break; 1336 case PCI_DEVICE_ID_LP10000S: 1337 m = (typeof(m)){"LP10000-S", max_speed, "PCI"}; 1338 break; 1339 case PCI_DEVICE_ID_LP11000S: 1340 m = (typeof(m)){"LP11000-S", max_speed, 1341 "PCI-X2"}; 1342 break; 1343 case PCI_DEVICE_ID_LPE11000S: 1344 m = (typeof(m)){"LPe11000-S", max_speed, 1345 "PCIe"}; 1346 break; 1347 case PCI_DEVICE_ID_SAT: 1348 m = (typeof(m)){"LPe12000", max_speed, "PCIe"}; 1349 break; 1350 case PCI_DEVICE_ID_SAT_MID: 1351 m = (typeof(m)){"LPe1250", max_speed, "PCIe"}; 1352 break; 1353 case PCI_DEVICE_ID_SAT_SMB: 1354 m = (typeof(m)){"LPe121", max_speed, "PCIe"}; 1355 break; 1356 case PCI_DEVICE_ID_SAT_DCSP: 1357 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"}; 1358 break; 1359 case PCI_DEVICE_ID_SAT_SCSP: 1360 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"}; 1361 break; 1362 case PCI_DEVICE_ID_SAT_S: 1363 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"}; 1364 break; 1365 case PCI_DEVICE_ID_HORNET: 1366 m = (typeof(m)){"LP21000", max_speed, "PCIe"}; 1367 GE = 1; 1368 break; 1369 case PCI_DEVICE_ID_PROTEUS_VF: 1370 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1371 break; 1372 case PCI_DEVICE_ID_PROTEUS_PF: 1373 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"}; 1374 break; 1375 case PCI_DEVICE_ID_PROTEUS_S: 1376 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; 1377 break; 1378 default: 1379 m = (typeof(m)){ NULL }; 1380 break; 1381 } 1382 1383 if (mdp && mdp[0] == '\0') 1384 snprintf(mdp, 79,"%s", m.name); 1385 if (descp && descp[0] == '\0') 1386 snprintf(descp, 255, 1387 "Emulex %s %d%s %s %s", 1388 m.name, m.max_speed, 1389 (GE) ? "GE" : "Gb", 1390 m.bus, 1391 (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); 1392 } 1393 1394 /** 1395 * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring. 1396 * @phba: pointer to lpfc hba data structure. 1397 * @pring: pointer to a IOCB ring. 1398 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1399 * 1400 * This routine posts a given number of IOCBs with the associated DMA buffer 1401 * descriptors specified by the cnt argument to the given IOCB ring. 1402 * 1403 * Return codes 1404 * The number of IOCBs NOT able to be posted to the IOCB ring. 1405 **/ 1406 int 1407 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1408 { 1409 IOCB_t *icmd; 1410 struct lpfc_iocbq *iocb; 1411 struct lpfc_dmabuf *mp1, *mp2; 1412 1413 cnt += pring->missbufcnt; 1414 1415 /* While there are buffers to post */ 1416 while (cnt > 0) { 1417 /* Allocate buffer for command iocb */ 1418 iocb = lpfc_sli_get_iocbq(phba); 1419 if (iocb == NULL) { 1420 pring->missbufcnt = cnt; 1421 return cnt; 1422 } 1423 icmd = &iocb->iocb; 1424 1425 /* 2 buffers can be posted per command */ 1426 /* Allocate buffer to post */ 1427 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1428 if (mp1) 1429 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1430 if (!mp1 || !mp1->virt) { 1431 kfree(mp1); 1432 lpfc_sli_release_iocbq(phba, iocb); 1433 pring->missbufcnt = cnt; 1434 return cnt; 1435 } 1436 1437 INIT_LIST_HEAD(&mp1->list); 1438 /* Allocate buffer to post */ 1439 if (cnt > 1) { 1440 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1441 if (mp2) 1442 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 1443 &mp2->phys); 1444 if (!mp2 || !mp2->virt) { 1445 kfree(mp2); 1446 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1447 kfree(mp1); 1448 lpfc_sli_release_iocbq(phba, iocb); 1449 pring->missbufcnt = cnt; 1450 return cnt; 1451 } 1452 1453 INIT_LIST_HEAD(&mp2->list); 1454 } else { 1455 mp2 = NULL; 1456 } 1457 1458 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 1459 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 1460 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 1461 icmd->ulpBdeCount = 1; 1462 cnt--; 1463 if (mp2) { 1464 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 1465 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 1466 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 1467 cnt--; 1468 icmd->ulpBdeCount = 2; 1469 } 1470 1471 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1472 icmd->ulpLe = 1; 1473 1474 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1475 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1476 kfree(mp1); 1477 cnt++; 1478 if (mp2) { 1479 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 1480 kfree(mp2); 1481 cnt++; 1482 } 1483 lpfc_sli_release_iocbq(phba, iocb); 1484 pring->missbufcnt = cnt; 1485 return cnt; 1486 } 1487 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1488 if (mp2) 1489 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1490 } 1491 pring->missbufcnt = 0; 1492 return 0; 1493 } 1494 1495 /** 1496 * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring. 1497 * @phba: pointer to lpfc hba data structure. 1498 * 1499 * This routine posts initial receive IOCB buffers to the ELS ring. The 1500 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 1501 * set to 64 IOCBs. 1502 * 1503 * Return codes 1504 * 0 - success (currently always success) 1505 **/ 1506 static int 1507 lpfc_post_rcv_buf(struct lpfc_hba *phba) 1508 { 1509 struct lpfc_sli *psli = &phba->sli; 1510 1511 /* Ring 0, ELS / CT buffers */ 1512 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 1513 /* Ring 2 - FCP no buffers needed */ 1514 1515 return 0; 1516 } 1517 1518 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 1519 1520 /** 1521 * lpfc_sha_init: Set up initial array of hash table entries. 1522 * @HashResultPointer: pointer to an array as hash table. 1523 * 1524 * This routine sets up the initial values to the array of hash table entries 1525 * for the LC HBAs. 1526 **/ 1527 static void 1528 lpfc_sha_init(uint32_t * HashResultPointer) 1529 { 1530 HashResultPointer[0] = 0x67452301; 1531 HashResultPointer[1] = 0xEFCDAB89; 1532 HashResultPointer[2] = 0x98BADCFE; 1533 HashResultPointer[3] = 0x10325476; 1534 HashResultPointer[4] = 0xC3D2E1F0; 1535 } 1536 1537 /** 1538 * lpfc_sha_iterate: Iterate initial hash table with the working hash table. 1539 * @HashResultPointer: pointer to an initial/result hash table. 1540 * @HashWorkingPointer: pointer to an working hash table. 1541 * 1542 * This routine iterates an initial hash table pointed by @HashResultPointer 1543 * with the values from the working hash table pointeed by @HashWorkingPointer. 1544 * The results are putting back to the initial hash table, returned through 1545 * the @HashResultPointer as the result hash table. 1546 **/ 1547 static void 1548 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 1549 { 1550 int t; 1551 uint32_t TEMP; 1552 uint32_t A, B, C, D, E; 1553 t = 16; 1554 do { 1555 HashWorkingPointer[t] = 1556 S(1, 1557 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 1558 8] ^ 1559 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 1560 } while (++t <= 79); 1561 t = 0; 1562 A = HashResultPointer[0]; 1563 B = HashResultPointer[1]; 1564 C = HashResultPointer[2]; 1565 D = HashResultPointer[3]; 1566 E = HashResultPointer[4]; 1567 1568 do { 1569 if (t < 20) { 1570 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 1571 } else if (t < 40) { 1572 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 1573 } else if (t < 60) { 1574 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 1575 } else { 1576 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 1577 } 1578 TEMP += S(5, A) + E + HashWorkingPointer[t]; 1579 E = D; 1580 D = C; 1581 C = S(30, B); 1582 B = A; 1583 A = TEMP; 1584 } while (++t <= 79); 1585 1586 HashResultPointer[0] += A; 1587 HashResultPointer[1] += B; 1588 HashResultPointer[2] += C; 1589 HashResultPointer[3] += D; 1590 HashResultPointer[4] += E; 1591 1592 } 1593 1594 /** 1595 * lpfc_challenge_key: Create challenge key based on WWPN of the HBA. 1596 * @RandomChallenge: pointer to the entry of host challenge random number array. 1597 * @HashWorking: pointer to the entry of the working hash array. 1598 * 1599 * This routine calculates the working hash array referred by @HashWorking 1600 * from the challenge random numbers associated with the host, referred by 1601 * @RandomChallenge. The result is put into the entry of the working hash 1602 * array and returned by reference through @HashWorking. 1603 **/ 1604 static void 1605 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 1606 { 1607 *HashWorking = (*RandomChallenge ^ *HashWorking); 1608 } 1609 1610 /** 1611 * lpfc_hba_init: Perform special handling for LC HBA initialization. 1612 * @phba: pointer to lpfc hba data structure. 1613 * @hbainit: pointer to an array of unsigned 32-bit integers. 1614 * 1615 * This routine performs the special handling for LC HBA initialization. 1616 **/ 1617 void 1618 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 1619 { 1620 int t; 1621 uint32_t *HashWorking; 1622 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 1623 1624 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 1625 if (!HashWorking) 1626 return; 1627 1628 HashWorking[0] = HashWorking[78] = *pwwnn++; 1629 HashWorking[1] = HashWorking[79] = *pwwnn; 1630 1631 for (t = 0; t < 7; t++) 1632 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 1633 1634 lpfc_sha_init(hbainit); 1635 lpfc_sha_iterate(hbainit, HashWorking); 1636 kfree(HashWorking); 1637 } 1638 1639 /** 1640 * lpfc_cleanup: Performs vport cleanups before deleting a vport. 1641 * @vport: pointer to a virtual N_Port data structure. 1642 * 1643 * This routine performs the necessary cleanups before deleting the @vport. 1644 * It invokes the discovery state machine to perform necessary state 1645 * transitions and to release the ndlps associated with the @vport. Note, 1646 * the physical port is treated as @vport 0. 1647 **/ 1648 void 1649 lpfc_cleanup(struct lpfc_vport *vport) 1650 { 1651 struct lpfc_hba *phba = vport->phba; 1652 struct lpfc_nodelist *ndlp, *next_ndlp; 1653 int i = 0; 1654 1655 if (phba->link_state > LPFC_LINK_DOWN) 1656 lpfc_port_link_failure(vport); 1657 1658 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1659 if (!NLP_CHK_NODE_ACT(ndlp)) { 1660 ndlp = lpfc_enable_node(vport, ndlp, 1661 NLP_STE_UNUSED_NODE); 1662 if (!ndlp) 1663 continue; 1664 spin_lock_irq(&phba->ndlp_lock); 1665 NLP_SET_FREE_REQ(ndlp); 1666 spin_unlock_irq(&phba->ndlp_lock); 1667 /* Trigger the release of the ndlp memory */ 1668 lpfc_nlp_put(ndlp); 1669 continue; 1670 } 1671 spin_lock_irq(&phba->ndlp_lock); 1672 if (NLP_CHK_FREE_REQ(ndlp)) { 1673 /* The ndlp should not be in memory free mode already */ 1674 spin_unlock_irq(&phba->ndlp_lock); 1675 continue; 1676 } else 1677 /* Indicate request for freeing ndlp memory */ 1678 NLP_SET_FREE_REQ(ndlp); 1679 spin_unlock_irq(&phba->ndlp_lock); 1680 1681 if (vport->port_type != LPFC_PHYSICAL_PORT && 1682 ndlp->nlp_DID == Fabric_DID) { 1683 /* Just free up ndlp with Fabric_DID for vports */ 1684 lpfc_nlp_put(ndlp); 1685 continue; 1686 } 1687 1688 if (ndlp->nlp_type & NLP_FABRIC) 1689 lpfc_disc_state_machine(vport, ndlp, NULL, 1690 NLP_EVT_DEVICE_RECOVERY); 1691 1692 lpfc_disc_state_machine(vport, ndlp, NULL, 1693 NLP_EVT_DEVICE_RM); 1694 1695 } 1696 1697 /* At this point, ALL ndlp's should be gone 1698 * because of the previous NLP_EVT_DEVICE_RM. 1699 * Lets wait for this to happen, if needed. 1700 */ 1701 while (!list_empty(&vport->fc_nodes)) { 1702 1703 if (i++ > 3000) { 1704 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1705 "0233 Nodelist not empty\n"); 1706 list_for_each_entry_safe(ndlp, next_ndlp, 1707 &vport->fc_nodes, nlp_listp) { 1708 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 1709 LOG_NODE, 1710 "0282 did:x%x ndlp:x%p " 1711 "usgmap:x%x refcnt:%d\n", 1712 ndlp->nlp_DID, (void *)ndlp, 1713 ndlp->nlp_usg_map, 1714 atomic_read( 1715 &ndlp->kref.refcount)); 1716 } 1717 break; 1718 } 1719 1720 /* Wait for any activity on ndlps to settle */ 1721 msleep(10); 1722 } 1723 return; 1724 } 1725 1726 /** 1727 * lpfc_stop_vport_timers: Stop all the timers associated with a vport. 1728 * @vport: pointer to a virtual N_Port data structure. 1729 * 1730 * This routine stops all the timers associated with a @vport. This function 1731 * is invoked before disabling or deleting a @vport. Note that the physical 1732 * port is treated as @vport 0. 1733 **/ 1734 void 1735 lpfc_stop_vport_timers(struct lpfc_vport *vport) 1736 { 1737 del_timer_sync(&vport->els_tmofunc); 1738 del_timer_sync(&vport->fc_fdmitmo); 1739 lpfc_can_disctmo(vport); 1740 return; 1741 } 1742 1743 /** 1744 * lpfc_stop_phba_timers: Stop all the timers associated with an HBA. 1745 * @phba: pointer to lpfc hba data structure. 1746 * 1747 * This routine stops all the timers associated with a HBA. This function is 1748 * invoked before either putting a HBA offline or unloading the driver. 1749 **/ 1750 static void 1751 lpfc_stop_phba_timers(struct lpfc_hba *phba) 1752 { 1753 del_timer_sync(&phba->fcp_poll_timer); 1754 lpfc_stop_vport_timers(phba->pport); 1755 del_timer_sync(&phba->sli.mbox_tmo); 1756 del_timer_sync(&phba->fabric_block_timer); 1757 phba->hb_outstanding = 0; 1758 del_timer_sync(&phba->hb_tmofunc); 1759 del_timer_sync(&phba->eratt_poll); 1760 return; 1761 } 1762 1763 /** 1764 * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked. 1765 * @phba: pointer to lpfc hba data structure. 1766 * 1767 * This routine marks a HBA's management interface as blocked. Once the HBA's 1768 * management interface is marked as blocked, all the user space access to 1769 * the HBA, whether they are from sysfs interface or libdfc interface will 1770 * all be blocked. The HBA is set to block the management interface when the 1771 * driver prepares the HBA interface for online or offline. 1772 **/ 1773 static void 1774 lpfc_block_mgmt_io(struct lpfc_hba * phba) 1775 { 1776 unsigned long iflag; 1777 1778 spin_lock_irqsave(&phba->hbalock, iflag); 1779 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 1780 spin_unlock_irqrestore(&phba->hbalock, iflag); 1781 } 1782 1783 /** 1784 * lpfc_online: Initialize and bring a HBA online. 1785 * @phba: pointer to lpfc hba data structure. 1786 * 1787 * This routine initializes the HBA and brings a HBA online. During this 1788 * process, the management interface is blocked to prevent user space access 1789 * to the HBA interfering with the driver initialization. 1790 * 1791 * Return codes 1792 * 0 - successful 1793 * 1 - failed 1794 **/ 1795 int 1796 lpfc_online(struct lpfc_hba *phba) 1797 { 1798 struct lpfc_vport *vport; 1799 struct lpfc_vport **vports; 1800 int i; 1801 1802 if (!phba) 1803 return 0; 1804 vport = phba->pport; 1805 1806 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 1807 return 0; 1808 1809 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1810 "0458 Bring Adapter online\n"); 1811 1812 lpfc_block_mgmt_io(phba); 1813 1814 if (!lpfc_sli_queue_setup(phba)) { 1815 lpfc_unblock_mgmt_io(phba); 1816 return 1; 1817 } 1818 1819 if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ 1820 lpfc_unblock_mgmt_io(phba); 1821 return 1; 1822 } 1823 1824 vports = lpfc_create_vport_work_array(phba); 1825 if (vports != NULL) 1826 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1827 struct Scsi_Host *shost; 1828 shost = lpfc_shost_from_vport(vports[i]); 1829 spin_lock_irq(shost->host_lock); 1830 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 1831 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 1832 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1833 spin_unlock_irq(shost->host_lock); 1834 } 1835 lpfc_destroy_vport_work_array(phba, vports); 1836 1837 lpfc_unblock_mgmt_io(phba); 1838 return 0; 1839 } 1840 1841 /** 1842 * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked. 1843 * @phba: pointer to lpfc hba data structure. 1844 * 1845 * This routine marks a HBA's management interface as not blocked. Once the 1846 * HBA's management interface is marked as not blocked, all the user space 1847 * access to the HBA, whether they are from sysfs interface or libdfc 1848 * interface will be allowed. The HBA is set to block the management interface 1849 * when the driver prepares the HBA interface for online or offline and then 1850 * set to unblock the management interface afterwards. 1851 **/ 1852 void 1853 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 1854 { 1855 unsigned long iflag; 1856 1857 spin_lock_irqsave(&phba->hbalock, iflag); 1858 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 1859 spin_unlock_irqrestore(&phba->hbalock, iflag); 1860 } 1861 1862 /** 1863 * lpfc_offline_prep: Prepare a HBA to be brought offline. 1864 * @phba: pointer to lpfc hba data structure. 1865 * 1866 * This routine is invoked to prepare a HBA to be brought offline. It performs 1867 * unregistration login to all the nodes on all vports and flushes the mailbox 1868 * queue to make it ready to be brought offline. 1869 **/ 1870 void 1871 lpfc_offline_prep(struct lpfc_hba * phba) 1872 { 1873 struct lpfc_vport *vport = phba->pport; 1874 struct lpfc_nodelist *ndlp, *next_ndlp; 1875 struct lpfc_vport **vports; 1876 int i; 1877 1878 if (vport->fc_flag & FC_OFFLINE_MODE) 1879 return; 1880 1881 lpfc_block_mgmt_io(phba); 1882 1883 lpfc_linkdown(phba); 1884 1885 /* Issue an unreg_login to all nodes on all vports */ 1886 vports = lpfc_create_vport_work_array(phba); 1887 if (vports != NULL) { 1888 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1889 struct Scsi_Host *shost; 1890 1891 if (vports[i]->load_flag & FC_UNLOADING) 1892 continue; 1893 shost = lpfc_shost_from_vport(vports[i]); 1894 list_for_each_entry_safe(ndlp, next_ndlp, 1895 &vports[i]->fc_nodes, 1896 nlp_listp) { 1897 if (!NLP_CHK_NODE_ACT(ndlp)) 1898 continue; 1899 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1900 continue; 1901 if (ndlp->nlp_type & NLP_FABRIC) { 1902 lpfc_disc_state_machine(vports[i], ndlp, 1903 NULL, NLP_EVT_DEVICE_RECOVERY); 1904 lpfc_disc_state_machine(vports[i], ndlp, 1905 NULL, NLP_EVT_DEVICE_RM); 1906 } 1907 spin_lock_irq(shost->host_lock); 1908 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1909 spin_unlock_irq(shost->host_lock); 1910 lpfc_unreg_rpi(vports[i], ndlp); 1911 } 1912 } 1913 } 1914 lpfc_destroy_vport_work_array(phba, vports); 1915 1916 lpfc_sli_flush_mbox_queue(phba); 1917 } 1918 1919 /** 1920 * lpfc_offline: Bring a HBA offline. 1921 * @phba: pointer to lpfc hba data structure. 1922 * 1923 * This routine actually brings a HBA offline. It stops all the timers 1924 * associated with the HBA, brings down the SLI layer, and eventually 1925 * marks the HBA as in offline state for the upper layer protocol. 1926 **/ 1927 void 1928 lpfc_offline(struct lpfc_hba *phba) 1929 { 1930 struct Scsi_Host *shost; 1931 struct lpfc_vport **vports; 1932 int i; 1933 1934 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 1935 return; 1936 1937 /* stop all timers associated with this hba */ 1938 lpfc_stop_phba_timers(phba); 1939 vports = lpfc_create_vport_work_array(phba); 1940 if (vports != NULL) 1941 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) 1942 lpfc_stop_vport_timers(vports[i]); 1943 lpfc_destroy_vport_work_array(phba, vports); 1944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1945 "0460 Bring Adapter offline\n"); 1946 /* Bring down the SLI Layer and cleanup. The HBA is offline 1947 now. */ 1948 lpfc_sli_hba_down(phba); 1949 spin_lock_irq(&phba->hbalock); 1950 phba->work_ha = 0; 1951 spin_unlock_irq(&phba->hbalock); 1952 vports = lpfc_create_vport_work_array(phba); 1953 if (vports != NULL) 1954 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 1955 shost = lpfc_shost_from_vport(vports[i]); 1956 spin_lock_irq(shost->host_lock); 1957 vports[i]->work_port_events = 0; 1958 vports[i]->fc_flag |= FC_OFFLINE_MODE; 1959 spin_unlock_irq(shost->host_lock); 1960 } 1961 lpfc_destroy_vport_work_array(phba, vports); 1962 } 1963 1964 /** 1965 * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists. 1966 * @phba: pointer to lpfc hba data structure. 1967 * 1968 * This routine is to free all the SCSI buffers and IOCBs from the driver 1969 * list back to kernel. It is called from lpfc_pci_remove_one to free 1970 * the internal resources before the device is removed from the system. 1971 * 1972 * Return codes 1973 * 0 - successful (for now, it always returns 0) 1974 **/ 1975 static int 1976 lpfc_scsi_free(struct lpfc_hba *phba) 1977 { 1978 struct lpfc_scsi_buf *sb, *sb_next; 1979 struct lpfc_iocbq *io, *io_next; 1980 1981 spin_lock_irq(&phba->hbalock); 1982 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1983 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1984 list_del(&sb->list); 1985 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1986 sb->dma_handle); 1987 kfree(sb); 1988 phba->total_scsi_bufs--; 1989 } 1990 1991 /* Release all the lpfc_iocbq entries maintained by this host. */ 1992 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 1993 list_del(&io->list); 1994 kfree(io); 1995 phba->total_iocbq_bufs--; 1996 } 1997 1998 spin_unlock_irq(&phba->hbalock); 1999 2000 return 0; 2001 } 2002 2003 /** 2004 * lpfc_create_port: Create an FC port. 2005 * @phba: pointer to lpfc hba data structure. 2006 * @instance: a unique integer ID to this FC port. 2007 * @dev: pointer to the device data structure. 2008 * 2009 * This routine creates a FC port for the upper layer protocol. The FC port 2010 * can be created on top of either a physical port or a virtual port provided 2011 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2012 * and associates the FC port created before adding the shost into the SCSI 2013 * layer. 2014 * 2015 * Return codes 2016 * @vport - pointer to the virtual N_Port data structure. 2017 * NULL - port create failed. 2018 **/ 2019 struct lpfc_vport * 2020 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2021 { 2022 struct lpfc_vport *vport; 2023 struct Scsi_Host *shost; 2024 int error = 0; 2025 2026 if (dev != &phba->pcidev->dev) 2027 shost = scsi_host_alloc(&lpfc_vport_template, 2028 sizeof(struct lpfc_vport)); 2029 else 2030 shost = scsi_host_alloc(&lpfc_template, 2031 sizeof(struct lpfc_vport)); 2032 if (!shost) 2033 goto out; 2034 2035 vport = (struct lpfc_vport *) shost->hostdata; 2036 vport->phba = phba; 2037 vport->load_flag |= FC_LOADING; 2038 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2039 vport->fc_rscn_flush = 0; 2040 2041 lpfc_get_vport_cfgparam(vport); 2042 shost->unique_id = instance; 2043 shost->max_id = LPFC_MAX_TARGET; 2044 shost->max_lun = vport->cfg_max_luns; 2045 shost->this_id = -1; 2046 shost->max_cmd_len = 16; 2047 2048 /* 2049 * Set initial can_queue value since 0 is no longer supported and 2050 * scsi_add_host will fail. This will be adjusted later based on the 2051 * max xri value determined in hba setup. 2052 */ 2053 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2054 if (dev != &phba->pcidev->dev) { 2055 shost->transportt = lpfc_vport_transport_template; 2056 vport->port_type = LPFC_NPIV_PORT; 2057 } else { 2058 shost->transportt = lpfc_transport_template; 2059 vport->port_type = LPFC_PHYSICAL_PORT; 2060 } 2061 2062 /* Initialize all internally managed lists. */ 2063 INIT_LIST_HEAD(&vport->fc_nodes); 2064 spin_lock_init(&vport->work_port_lock); 2065 2066 init_timer(&vport->fc_disctmo); 2067 vport->fc_disctmo.function = lpfc_disc_timeout; 2068 vport->fc_disctmo.data = (unsigned long)vport; 2069 2070 init_timer(&vport->fc_fdmitmo); 2071 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2072 vport->fc_fdmitmo.data = (unsigned long)vport; 2073 2074 init_timer(&vport->els_tmofunc); 2075 vport->els_tmofunc.function = lpfc_els_timeout; 2076 vport->els_tmofunc.data = (unsigned long)vport; 2077 2078 error = scsi_add_host(shost, dev); 2079 if (error) 2080 goto out_put_shost; 2081 2082 spin_lock_irq(&phba->hbalock); 2083 list_add_tail(&vport->listentry, &phba->port_list); 2084 spin_unlock_irq(&phba->hbalock); 2085 return vport; 2086 2087 out_put_shost: 2088 scsi_host_put(shost); 2089 out: 2090 return NULL; 2091 } 2092 2093 /** 2094 * destroy_port: Destroy an FC port. 2095 * @vport: pointer to an lpfc virtual N_Port data structure. 2096 * 2097 * This routine destroys a FC port from the upper layer protocol. All the 2098 * resources associated with the port are released. 2099 **/ 2100 void 2101 destroy_port(struct lpfc_vport *vport) 2102 { 2103 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2104 struct lpfc_hba *phba = vport->phba; 2105 2106 lpfc_debugfs_terminate(vport); 2107 fc_remove_host(shost); 2108 scsi_remove_host(shost); 2109 2110 spin_lock_irq(&phba->hbalock); 2111 list_del_init(&vport->listentry); 2112 spin_unlock_irq(&phba->hbalock); 2113 2114 lpfc_cleanup(vport); 2115 return; 2116 } 2117 2118 /** 2119 * lpfc_get_instance: Get a unique integer ID. 2120 * 2121 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2122 * uses the kernel idr facility to perform the task. 2123 * 2124 * Return codes: 2125 * instance - a unique integer ID allocated as the new instance. 2126 * -1 - lpfc get instance failed. 2127 **/ 2128 int 2129 lpfc_get_instance(void) 2130 { 2131 int instance = 0; 2132 2133 /* Assign an unused number */ 2134 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2135 return -1; 2136 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2137 return -1; 2138 return instance; 2139 } 2140 2141 /** 2142 * lpfc_scan_finished: method for SCSI layer to detect whether scan is done. 2143 * @shost: pointer to SCSI host data structure. 2144 * @time: elapsed time of the scan in jiffies. 2145 * 2146 * This routine is called by the SCSI layer with a SCSI host to determine 2147 * whether the scan host is finished. 2148 * 2149 * Note: there is no scan_start function as adapter initialization will have 2150 * asynchronously kicked off the link initialization. 2151 * 2152 * Return codes 2153 * 0 - SCSI host scan is not over yet. 2154 * 1 - SCSI host scan is over. 2155 **/ 2156 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2157 { 2158 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2159 struct lpfc_hba *phba = vport->phba; 2160 int stat = 0; 2161 2162 spin_lock_irq(shost->host_lock); 2163 2164 if (vport->load_flag & FC_UNLOADING) { 2165 stat = 1; 2166 goto finished; 2167 } 2168 if (time >= 30 * HZ) { 2169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2170 "0461 Scanning longer than 30 " 2171 "seconds. Continuing initialization\n"); 2172 stat = 1; 2173 goto finished; 2174 } 2175 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2177 "0465 Link down longer than 15 " 2178 "seconds. Continuing initialization\n"); 2179 stat = 1; 2180 goto finished; 2181 } 2182 2183 if (vport->port_state != LPFC_VPORT_READY) 2184 goto finished; 2185 if (vport->num_disc_nodes || vport->fc_prli_sent) 2186 goto finished; 2187 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2188 goto finished; 2189 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2190 goto finished; 2191 2192 stat = 1; 2193 2194 finished: 2195 spin_unlock_irq(shost->host_lock); 2196 return stat; 2197 } 2198 2199 /** 2200 * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port. 2201 * @shost: pointer to SCSI host data structure. 2202 * 2203 * This routine initializes a given SCSI host attributes on a FC port. The 2204 * SCSI host can be either on top of a physical port or a virtual port. 2205 **/ 2206 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2207 { 2208 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2209 struct lpfc_hba *phba = vport->phba; 2210 /* 2211 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2212 */ 2213 2214 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2215 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2216 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2217 2218 memset(fc_host_supported_fc4s(shost), 0, 2219 sizeof(fc_host_supported_fc4s(shost))); 2220 fc_host_supported_fc4s(shost)[2] = 1; 2221 fc_host_supported_fc4s(shost)[7] = 1; 2222 2223 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2224 sizeof fc_host_symbolic_name(shost)); 2225 2226 fc_host_supported_speeds(shost) = 0; 2227 if (phba->lmt & LMT_10Gb) 2228 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2229 if (phba->lmt & LMT_8Gb) 2230 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2231 if (phba->lmt & LMT_4Gb) 2232 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2233 if (phba->lmt & LMT_2Gb) 2234 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2235 if (phba->lmt & LMT_1Gb) 2236 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2237 2238 fc_host_maxframe_size(shost) = 2239 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2240 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2241 2242 /* This value is also unchanging */ 2243 memset(fc_host_active_fc4s(shost), 0, 2244 sizeof(fc_host_active_fc4s(shost))); 2245 fc_host_active_fc4s(shost)[2] = 1; 2246 fc_host_active_fc4s(shost)[7] = 1; 2247 2248 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2249 spin_lock_irq(shost->host_lock); 2250 vport->load_flag &= ~FC_LOADING; 2251 spin_unlock_irq(shost->host_lock); 2252 } 2253 2254 /** 2255 * lpfc_enable_msix: Enable MSI-X interrupt mode. 2256 * @phba: pointer to lpfc hba data structure. 2257 * 2258 * This routine is invoked to enable the MSI-X interrupt vectors. The kernel 2259 * function pci_enable_msix() is called to enable the MSI-X vectors. Note that 2260 * pci_enable_msix(), once invoked, enables either all or nothing, depending 2261 * on the current availability of PCI vector resources. The device driver is 2262 * responsible for calling the individual request_irq() to register each MSI-X 2263 * vector with a interrupt handler, which is done in this function. Note that 2264 * later when device is unloading, the driver should always call free_irq() 2265 * on all MSI-X vectors it has done request_irq() on before calling 2266 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 2267 * will be left with MSI-X enabled and leaks its vectors. 2268 * 2269 * Return codes 2270 * 0 - sucessful 2271 * other values - error 2272 **/ 2273 static int 2274 lpfc_enable_msix(struct lpfc_hba *phba) 2275 { 2276 int rc, i; 2277 LPFC_MBOXQ_t *pmb; 2278 2279 /* Set up MSI-X multi-message vectors */ 2280 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2281 phba->msix_entries[i].entry = i; 2282 2283 /* Configure MSI-X capability structure */ 2284 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 2285 ARRAY_SIZE(phba->msix_entries)); 2286 if (rc) { 2287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2288 "0420 PCI enable MSI-X failed (%d)\n", rc); 2289 goto msi_fail_out; 2290 } else 2291 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2293 "0477 MSI-X entry[%d]: vector=x%x " 2294 "message=%d\n", i, 2295 phba->msix_entries[i].vector, 2296 phba->msix_entries[i].entry); 2297 /* 2298 * Assign MSI-X vectors to interrupt handlers 2299 */ 2300 2301 /* vector-0 is associated to slow-path handler */ 2302 rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, 2303 IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); 2304 if (rc) { 2305 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2306 "0421 MSI-X slow-path request_irq failed " 2307 "(%d)\n", rc); 2308 goto msi_fail_out; 2309 } 2310 2311 /* vector-1 is associated to fast-path handler */ 2312 rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, 2313 IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); 2314 2315 if (rc) { 2316 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2317 "0429 MSI-X fast-path request_irq failed " 2318 "(%d)\n", rc); 2319 goto irq_fail_out; 2320 } 2321 2322 /* 2323 * Configure HBA MSI-X attention conditions to messages 2324 */ 2325 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2326 2327 if (!pmb) { 2328 rc = -ENOMEM; 2329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2330 "0474 Unable to allocate memory for issuing " 2331 "MBOX_CONFIG_MSI command\n"); 2332 goto mem_fail_out; 2333 } 2334 rc = lpfc_config_msi(phba, pmb); 2335 if (rc) 2336 goto mbx_fail_out; 2337 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2338 if (rc != MBX_SUCCESS) { 2339 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2340 "0351 Config MSI mailbox command failed, " 2341 "mbxCmd x%x, mbxStatus x%x\n", 2342 pmb->mb.mbxCommand, pmb->mb.mbxStatus); 2343 goto mbx_fail_out; 2344 } 2345 2346 /* Free memory allocated for mailbox command */ 2347 mempool_free(pmb, phba->mbox_mem_pool); 2348 return rc; 2349 2350 mbx_fail_out: 2351 /* Free memory allocated for mailbox command */ 2352 mempool_free(pmb, phba->mbox_mem_pool); 2353 2354 mem_fail_out: 2355 /* free the irq already requested */ 2356 free_irq(phba->msix_entries[1].vector, phba); 2357 2358 irq_fail_out: 2359 /* free the irq already requested */ 2360 free_irq(phba->msix_entries[0].vector, phba); 2361 2362 msi_fail_out: 2363 /* Unconfigure MSI-X capability structure */ 2364 pci_disable_msix(phba->pcidev); 2365 return rc; 2366 } 2367 2368 /** 2369 * lpfc_disable_msix: Disable MSI-X interrupt mode. 2370 * @phba: pointer to lpfc hba data structure. 2371 * 2372 * This routine is invoked to release the MSI-X vectors and then disable the 2373 * MSI-X interrupt mode. 2374 **/ 2375 static void 2376 lpfc_disable_msix(struct lpfc_hba *phba) 2377 { 2378 int i; 2379 2380 /* Free up MSI-X multi-message vectors */ 2381 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 2382 free_irq(phba->msix_entries[i].vector, phba); 2383 /* Disable MSI-X */ 2384 pci_disable_msix(phba->pcidev); 2385 } 2386 2387 /** 2388 * lpfc_enable_msi: Enable MSI interrupt mode. 2389 * @phba: pointer to lpfc hba data structure. 2390 * 2391 * This routine is invoked to enable the MSI interrupt mode. The kernel 2392 * function pci_enable_msi() is called to enable the MSI vector. The 2393 * device driver is responsible for calling the request_irq() to register 2394 * MSI vector with a interrupt the handler, which is done in this function. 2395 * 2396 * Return codes 2397 * 0 - sucessful 2398 * other values - error 2399 */ 2400 static int 2401 lpfc_enable_msi(struct lpfc_hba *phba) 2402 { 2403 int rc; 2404 2405 rc = pci_enable_msi(phba->pcidev); 2406 if (!rc) 2407 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2408 "0462 PCI enable MSI mode success.\n"); 2409 else { 2410 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2411 "0471 PCI enable MSI mode failed (%d)\n", rc); 2412 return rc; 2413 } 2414 2415 rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2416 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2417 if (rc) { 2418 pci_disable_msi(phba->pcidev); 2419 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2420 "0478 MSI request_irq failed (%d)\n", rc); 2421 } 2422 return rc; 2423 } 2424 2425 /** 2426 * lpfc_disable_msi: Disable MSI interrupt mode. 2427 * @phba: pointer to lpfc hba data structure. 2428 * 2429 * This routine is invoked to disable the MSI interrupt mode. The driver 2430 * calls free_irq() on MSI vector it has done request_irq() on before 2431 * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and 2432 * a device will be left with MSI enabled and leaks its vector. 2433 */ 2434 2435 static void 2436 lpfc_disable_msi(struct lpfc_hba *phba) 2437 { 2438 free_irq(phba->pcidev->irq, phba); 2439 pci_disable_msi(phba->pcidev); 2440 return; 2441 } 2442 2443 /** 2444 * lpfc_log_intr_mode: Log the active interrupt mode 2445 * @phba: pointer to lpfc hba data structure. 2446 * @intr_mode: active interrupt mode adopted. 2447 * 2448 * This routine it invoked to log the currently used active interrupt mode 2449 * to the device. 2450 */ 2451 static void 2452 lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 2453 { 2454 switch (intr_mode) { 2455 case 0: 2456 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2457 "0470 Enable INTx interrupt mode.\n"); 2458 break; 2459 case 1: 2460 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2461 "0481 Enabled MSI interrupt mode.\n"); 2462 break; 2463 case 2: 2464 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2465 "0480 Enabled MSI-X interrupt mode.\n"); 2466 break; 2467 default: 2468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2469 "0482 Illegal interrupt mode.\n"); 2470 break; 2471 } 2472 return; 2473 } 2474 2475 static void 2476 lpfc_stop_port(struct lpfc_hba *phba) 2477 { 2478 /* Clear all interrupt enable conditions */ 2479 writel(0, phba->HCregaddr); 2480 readl(phba->HCregaddr); /* flush */ 2481 /* Clear all pending interrupts */ 2482 writel(0xffffffff, phba->HAregaddr); 2483 readl(phba->HAregaddr); /* flush */ 2484 2485 /* Reset some HBA SLI setup states */ 2486 lpfc_stop_phba_timers(phba); 2487 phba->pport->work_port_events = 0; 2488 2489 return; 2490 } 2491 2492 /** 2493 * lpfc_enable_intr: Enable device interrupt. 2494 * @phba: pointer to lpfc hba data structure. 2495 * 2496 * This routine is invoked to enable device interrupt and associate driver's 2497 * interrupt handler(s) to interrupt vector(s). Depends on the interrupt 2498 * mode configured to the driver, the driver will try to fallback from the 2499 * configured interrupt mode to an interrupt mode which is supported by the 2500 * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. 2501 * 2502 * Return codes 2503 * 0 - sucessful 2504 * other values - error 2505 **/ 2506 static uint32_t 2507 lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 2508 { 2509 uint32_t intr_mode = LPFC_INTR_ERROR; 2510 int retval; 2511 2512 if (cfg_mode == 2) { 2513 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 2514 retval = lpfc_sli_config_port(phba, 3); 2515 if (!retval) { 2516 /* Now, try to enable MSI-X interrupt mode */ 2517 retval = lpfc_enable_msix(phba); 2518 if (!retval) { 2519 /* Indicate initialization to MSI-X mode */ 2520 phba->intr_type = MSIX; 2521 intr_mode = 2; 2522 } 2523 } 2524 } 2525 2526 /* Fallback to MSI if MSI-X initialization failed */ 2527 if (cfg_mode >= 1 && phba->intr_type == NONE) { 2528 retval = lpfc_enable_msi(phba); 2529 if (!retval) { 2530 /* Indicate initialization to MSI mode */ 2531 phba->intr_type = MSI; 2532 intr_mode = 1; 2533 } 2534 } 2535 2536 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 2537 if (phba->intr_type == NONE) { 2538 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, 2539 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 2540 if (!retval) { 2541 /* Indicate initialization to INTx mode */ 2542 phba->intr_type = INTx; 2543 intr_mode = 0; 2544 } 2545 } 2546 return intr_mode; 2547 } 2548 2549 /** 2550 * lpfc_disable_intr: Disable device interrupt. 2551 * @phba: pointer to lpfc hba data structure. 2552 * 2553 * This routine is invoked to disable device interrupt and disassociate the 2554 * driver's interrupt handler(s) from interrupt vector(s). Depending on the 2555 * interrupt mode, the driver will release the interrupt vector(s) for the 2556 * message signaled interrupt. 2557 **/ 2558 static void 2559 lpfc_disable_intr(struct lpfc_hba *phba) 2560 { 2561 /* Disable the currently initialized interrupt mode */ 2562 if (phba->intr_type == MSIX) 2563 lpfc_disable_msix(phba); 2564 else if (phba->intr_type == MSI) 2565 lpfc_disable_msi(phba); 2566 else if (phba->intr_type == INTx) 2567 free_irq(phba->pcidev->irq, phba); 2568 2569 /* Reset interrupt management states */ 2570 phba->intr_type = NONE; 2571 phba->sli.slistat.sli_intr = 0; 2572 2573 return; 2574 } 2575 2576 /** 2577 * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem. 2578 * @pdev: pointer to PCI device 2579 * @pid: pointer to PCI device identifier 2580 * 2581 * This routine is to be registered to the kernel's PCI subsystem. When an 2582 * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at 2583 * PCI device-specific information of the device and driver to see if the 2584 * driver state that it can support this kind of device. If the match is 2585 * successful, the driver core invokes this routine. If this routine 2586 * determines it can claim the HBA, it does all the initialization that it 2587 * needs to do to handle the HBA properly. 2588 * 2589 * Return code 2590 * 0 - driver can claim the device 2591 * negative value - driver can not claim the device 2592 **/ 2593 static int __devinit 2594 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 2595 { 2596 struct lpfc_vport *vport = NULL; 2597 struct lpfc_hba *phba; 2598 struct lpfc_sli *psli; 2599 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 2600 struct Scsi_Host *shost = NULL; 2601 void *ptr; 2602 unsigned long bar0map_len, bar2map_len; 2603 int error = -ENODEV, retval; 2604 int i, hbq_count; 2605 uint16_t iotag; 2606 uint32_t cfg_mode, intr_mode; 2607 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 2608 struct lpfc_adapter_event_header adapter_event; 2609 2610 if (pci_enable_device_mem(pdev)) 2611 goto out; 2612 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 2613 goto out_disable_device; 2614 2615 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); 2616 if (!phba) 2617 goto out_release_regions; 2618 2619 atomic_set(&phba->fast_event_count, 0); 2620 spin_lock_init(&phba->hbalock); 2621 2622 /* Initialize ndlp management spinlock */ 2623 spin_lock_init(&phba->ndlp_lock); 2624 2625 phba->pcidev = pdev; 2626 2627 /* Assign an unused board number */ 2628 if ((phba->brd_no = lpfc_get_instance()) < 0) 2629 goto out_free_phba; 2630 2631 INIT_LIST_HEAD(&phba->port_list); 2632 init_waitqueue_head(&phba->wait_4_mlo_m_q); 2633 /* 2634 * Get all the module params for configuring this host and then 2635 * establish the host. 2636 */ 2637 lpfc_get_cfgparam(phba); 2638 phba->max_vpi = LPFC_MAX_VPI; 2639 2640 /* Initialize timers used by driver */ 2641 init_timer(&phba->hb_tmofunc); 2642 phba->hb_tmofunc.function = lpfc_hb_timeout; 2643 phba->hb_tmofunc.data = (unsigned long)phba; 2644 2645 psli = &phba->sli; 2646 init_timer(&psli->mbox_tmo); 2647 psli->mbox_tmo.function = lpfc_mbox_timeout; 2648 psli->mbox_tmo.data = (unsigned long) phba; 2649 init_timer(&phba->fcp_poll_timer); 2650 phba->fcp_poll_timer.function = lpfc_poll_timeout; 2651 phba->fcp_poll_timer.data = (unsigned long) phba; 2652 init_timer(&phba->fabric_block_timer); 2653 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 2654 phba->fabric_block_timer.data = (unsigned long) phba; 2655 init_timer(&phba->eratt_poll); 2656 phba->eratt_poll.function = lpfc_poll_eratt; 2657 phba->eratt_poll.data = (unsigned long) phba; 2658 2659 pci_set_master(pdev); 2660 pci_save_state(pdev); 2661 pci_try_set_mwi(pdev); 2662 2663 if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0) 2664 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0) 2665 goto out_idr_remove; 2666 2667 /* 2668 * Get the bus address of Bar0 and Bar2 and the number of bytes 2669 * required by each mapping. 2670 */ 2671 phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); 2672 bar0map_len = pci_resource_len(phba->pcidev, 0); 2673 2674 phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); 2675 bar2map_len = pci_resource_len(phba->pcidev, 2); 2676 2677 /* Map HBA SLIM to a kernel virtual address. */ 2678 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 2679 if (!phba->slim_memmap_p) { 2680 error = -ENODEV; 2681 dev_printk(KERN_ERR, &pdev->dev, 2682 "ioremap failed for SLIM memory.\n"); 2683 goto out_idr_remove; 2684 } 2685 2686 /* Map HBA Control Registers to a kernel virtual address. */ 2687 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 2688 if (!phba->ctrl_regs_memmap_p) { 2689 error = -ENODEV; 2690 dev_printk(KERN_ERR, &pdev->dev, 2691 "ioremap failed for HBA control registers.\n"); 2692 goto out_iounmap_slim; 2693 } 2694 2695 /* Allocate memory for SLI-2 structures */ 2696 phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, 2697 SLI2_SLIM_SIZE, 2698 &phba->slim2p.phys, 2699 GFP_KERNEL); 2700 if (!phba->slim2p.virt) 2701 goto out_iounmap; 2702 2703 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 2704 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 2705 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 2706 phba->IOCBs = (phba->slim2p.virt + 2707 offsetof(struct lpfc_sli2_slim, IOCBs)); 2708 2709 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, 2710 lpfc_sli_hbq_size(), 2711 &phba->hbqslimp.phys, 2712 GFP_KERNEL); 2713 if (!phba->hbqslimp.virt) 2714 goto out_free_slim; 2715 2716 hbq_count = lpfc_sli_hbq_count(); 2717 ptr = phba->hbqslimp.virt; 2718 for (i = 0; i < hbq_count; ++i) { 2719 phba->hbqs[i].hbq_virt = ptr; 2720 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 2721 ptr += (lpfc_hbq_defs[i]->entry_count * 2722 sizeof(struct lpfc_hbq_entry)); 2723 } 2724 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 2725 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 2726 2727 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2728 2729 INIT_LIST_HEAD(&phba->hbqbuf_in_list); 2730 2731 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2732 lpfc_sli_setup(phba); 2733 lpfc_sli_queue_setup(phba); 2734 2735 retval = lpfc_mem_alloc(phba); 2736 if (retval) { 2737 error = retval; 2738 goto out_free_hbqslimp; 2739 } 2740 2741 /* Initialize and populate the iocb list per host. */ 2742 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 2743 for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { 2744 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 2745 if (iocbq_entry == NULL) { 2746 printk(KERN_ERR "%s: only allocated %d iocbs of " 2747 "expected %d count. Unloading driver.\n", 2748 __func__, i, LPFC_IOCB_LIST_CNT); 2749 error = -ENOMEM; 2750 goto out_free_iocbq; 2751 } 2752 2753 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 2754 if (iotag == 0) { 2755 kfree (iocbq_entry); 2756 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2757 "Unloading driver.\n", 2758 __func__); 2759 error = -ENOMEM; 2760 goto out_free_iocbq; 2761 } 2762 2763 spin_lock_irq(&phba->hbalock); 2764 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 2765 phba->total_iocbq_bufs++; 2766 spin_unlock_irq(&phba->hbalock); 2767 } 2768 2769 /* Initialize HBA structure */ 2770 phba->fc_edtov = FF_DEF_EDTOV; 2771 phba->fc_ratov = FF_DEF_RATOV; 2772 phba->fc_altov = FF_DEF_ALTOV; 2773 phba->fc_arbtov = FF_DEF_ARBTOV; 2774 2775 INIT_LIST_HEAD(&phba->work_list); 2776 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 2777 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 2778 2779 /* Initialize the wait queue head for the kernel thread */ 2780 init_waitqueue_head(&phba->work_waitq); 2781 2782 /* Startup the kernel thread for this host adapter. */ 2783 phba->worker_thread = kthread_run(lpfc_do_work, phba, 2784 "lpfc_worker_%d", phba->brd_no); 2785 if (IS_ERR(phba->worker_thread)) { 2786 error = PTR_ERR(phba->worker_thread); 2787 goto out_free_iocbq; 2788 } 2789 2790 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 2791 spin_lock_init(&phba->scsi_buf_list_lock); 2792 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 2793 2794 /* Initialize list of fabric iocbs */ 2795 INIT_LIST_HEAD(&phba->fabric_iocb_list); 2796 2797 /* Initialize list to save ELS buffers */ 2798 INIT_LIST_HEAD(&phba->elsbuf); 2799 2800 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 2801 if (!vport) 2802 goto out_kthread_stop; 2803 2804 shost = lpfc_shost_from_vport(vport); 2805 phba->pport = vport; 2806 lpfc_debugfs_initialize(vport); 2807 2808 pci_set_drvdata(pdev, shost); 2809 2810 phba->MBslimaddr = phba->slim_memmap_p; 2811 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 2812 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 2813 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 2814 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 2815 2816 /* Configure sysfs attributes */ 2817 if (lpfc_alloc_sysfs_attr(vport)) { 2818 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2819 "1476 Failed to allocate sysfs attr\n"); 2820 error = -ENOMEM; 2821 goto out_destroy_port; 2822 } 2823 2824 cfg_mode = phba->cfg_use_msi; 2825 while (true) { 2826 /* Configure and enable interrupt */ 2827 intr_mode = lpfc_enable_intr(phba, cfg_mode); 2828 if (intr_mode == LPFC_INTR_ERROR) { 2829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2830 "0426 Failed to enable interrupt.\n"); 2831 goto out_free_sysfs_attr; 2832 } 2833 /* HBA SLI setup */ 2834 if (lpfc_sli_hba_setup(phba)) { 2835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2836 "1477 Failed to set up hba\n"); 2837 error = -ENODEV; 2838 goto out_remove_device; 2839 } 2840 2841 /* Wait 50ms for the interrupts of previous mailbox commands */ 2842 msleep(50); 2843 /* Check active interrupts received */ 2844 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 2845 /* Log the current active interrupt mode */ 2846 phba->intr_mode = intr_mode; 2847 lpfc_log_intr_mode(phba, intr_mode); 2848 break; 2849 } else { 2850 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2851 "0451 Configure interrupt mode (%d) " 2852 "failed active interrupt test.\n", 2853 intr_mode); 2854 if (intr_mode == 0) { 2855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2856 "0479 Failed to enable " 2857 "interrupt.\n"); 2858 error = -ENODEV; 2859 goto out_remove_device; 2860 } 2861 /* Stop HBA SLI setups */ 2862 lpfc_stop_port(phba); 2863 /* Disable the current interrupt mode */ 2864 lpfc_disable_intr(phba); 2865 /* Try next level of interrupt mode */ 2866 cfg_mode = --intr_mode; 2867 } 2868 } 2869 2870 /* 2871 * hba setup may have changed the hba_queue_depth so we need to adjust 2872 * the value of can_queue. 2873 */ 2874 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2875 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 2876 2877 if (lpfc_prot_mask && lpfc_prot_guard) { 2878 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2879 "1478 Registering BlockGuard with the " 2880 "SCSI layer\n"); 2881 2882 scsi_host_set_prot(shost, lpfc_prot_mask); 2883 scsi_host_set_guard(shost, lpfc_prot_guard); 2884 } 2885 } 2886 2887 if (!_dump_buf_data) { 2888 int pagecnt = 10; 2889 while (pagecnt) { 2890 spin_lock_init(&_dump_buf_lock); 2891 _dump_buf_data = 2892 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 2893 if (_dump_buf_data) { 2894 printk(KERN_ERR "BLKGRD allocated %d pages for " 2895 "_dump_buf_data at 0x%p\n", 2896 (1 << pagecnt), _dump_buf_data); 2897 _dump_buf_data_order = pagecnt; 2898 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) 2899 << pagecnt)); 2900 break; 2901 } else { 2902 --pagecnt; 2903 } 2904 2905 } 2906 2907 if (!_dump_buf_data_order) 2908 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 2909 "memory for hexdump\n"); 2910 2911 } else { 2912 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" 2913 "\n", _dump_buf_data); 2914 } 2915 2916 2917 if (!_dump_buf_dif) { 2918 int pagecnt = 10; 2919 while (pagecnt) { 2920 _dump_buf_dif = 2921 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 2922 if (_dump_buf_dif) { 2923 printk(KERN_ERR "BLKGRD allocated %d pages for " 2924 "_dump_buf_dif at 0x%p\n", 2925 (1 << pagecnt), _dump_buf_dif); 2926 _dump_buf_dif_order = pagecnt; 2927 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) 2928 << pagecnt)); 2929 break; 2930 } else { 2931 --pagecnt; 2932 } 2933 2934 } 2935 2936 if (!_dump_buf_dif_order) 2937 printk(KERN_ERR "BLKGRD ERROR unable to allocate " 2938 "memory for hexdump\n"); 2939 2940 } else { 2941 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", 2942 _dump_buf_dif); 2943 } 2944 2945 lpfc_host_attrib_init(shost); 2946 2947 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 2948 spin_lock_irq(shost->host_lock); 2949 lpfc_poll_start_timer(phba); 2950 spin_unlock_irq(shost->host_lock); 2951 } 2952 2953 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2954 "0428 Perform SCSI scan\n"); 2955 /* Send board arrival event to upper layer */ 2956 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 2957 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 2958 fc_host_post_vendor_event(shost, fc_get_event_number(), 2959 sizeof(adapter_event), 2960 (char *) &adapter_event, 2961 LPFC_NL_VENDOR_ID); 2962 2963 return 0; 2964 2965 out_remove_device: 2966 spin_lock_irq(shost->host_lock); 2967 vport->load_flag |= FC_UNLOADING; 2968 spin_unlock_irq(shost->host_lock); 2969 lpfc_stop_phba_timers(phba); 2970 phba->pport->work_port_events = 0; 2971 lpfc_disable_intr(phba); 2972 lpfc_sli_hba_down(phba); 2973 lpfc_sli_brdrestart(phba); 2974 out_free_sysfs_attr: 2975 lpfc_free_sysfs_attr(vport); 2976 out_destroy_port: 2977 destroy_port(vport); 2978 out_kthread_stop: 2979 kthread_stop(phba->worker_thread); 2980 out_free_iocbq: 2981 list_for_each_entry_safe(iocbq_entry, iocbq_next, 2982 &phba->lpfc_iocb_list, list) { 2983 kfree(iocbq_entry); 2984 phba->total_iocbq_bufs--; 2985 } 2986 lpfc_mem_free(phba); 2987 out_free_hbqslimp: 2988 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 2989 phba->hbqslimp.virt, phba->hbqslimp.phys); 2990 out_free_slim: 2991 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 2992 phba->slim2p.virt, phba->slim2p.phys); 2993 out_iounmap: 2994 iounmap(phba->ctrl_regs_memmap_p); 2995 out_iounmap_slim: 2996 iounmap(phba->slim_memmap_p); 2997 out_idr_remove: 2998 idr_remove(&lpfc_hba_index, phba->brd_no); 2999 out_free_phba: 3000 kfree(phba); 3001 out_release_regions: 3002 pci_release_selected_regions(pdev, bars); 3003 out_disable_device: 3004 pci_disable_device(pdev); 3005 out: 3006 pci_set_drvdata(pdev, NULL); 3007 if (shost) 3008 scsi_host_put(shost); 3009 return error; 3010 } 3011 3012 /** 3013 * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem. 3014 * @pdev: pointer to PCI device 3015 * 3016 * This routine is to be registered to the kernel's PCI subsystem. When an 3017 * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup 3018 * for the HBA device to be removed from the PCI subsystem properly. 3019 **/ 3020 static void __devexit 3021 lpfc_pci_remove_one(struct pci_dev *pdev) 3022 { 3023 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3024 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3025 struct lpfc_vport **vports; 3026 struct lpfc_hba *phba = vport->phba; 3027 int i; 3028 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 3029 3030 spin_lock_irq(&phba->hbalock); 3031 vport->load_flag |= FC_UNLOADING; 3032 spin_unlock_irq(&phba->hbalock); 3033 3034 lpfc_free_sysfs_attr(vport); 3035 3036 kthread_stop(phba->worker_thread); 3037 3038 /* Release all the vports against this physical port */ 3039 vports = lpfc_create_vport_work_array(phba); 3040 if (vports != NULL) 3041 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) 3042 fc_vport_terminate(vports[i]->fc_vport); 3043 lpfc_destroy_vport_work_array(phba, vports); 3044 3045 /* Remove FC host and then SCSI host with the physical port */ 3046 fc_remove_host(shost); 3047 scsi_remove_host(shost); 3048 lpfc_cleanup(vport); 3049 3050 /* 3051 * Bring down the SLI Layer. This step disable all interrupts, 3052 * clears the rings, discards all mailbox commands, and resets 3053 * the HBA. 3054 */ 3055 lpfc_sli_hba_down(phba); 3056 lpfc_sli_brdrestart(phba); 3057 3058 lpfc_stop_phba_timers(phba); 3059 spin_lock_irq(&phba->hbalock); 3060 list_del_init(&vport->listentry); 3061 spin_unlock_irq(&phba->hbalock); 3062 3063 lpfc_debugfs_terminate(vport); 3064 3065 /* Disable interrupt */ 3066 lpfc_disable_intr(phba); 3067 3068 pci_set_drvdata(pdev, NULL); 3069 scsi_host_put(shost); 3070 3071 /* 3072 * Call scsi_free before mem_free since scsi bufs are released to their 3073 * corresponding pools here. 3074 */ 3075 lpfc_scsi_free(phba); 3076 lpfc_mem_free(phba); 3077 3078 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 3079 phba->hbqslimp.virt, phba->hbqslimp.phys); 3080 3081 /* Free resources associated with SLI2 interface */ 3082 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 3083 phba->slim2p.virt, phba->slim2p.phys); 3084 3085 /* unmap adapter SLIM and Control Registers */ 3086 iounmap(phba->ctrl_regs_memmap_p); 3087 iounmap(phba->slim_memmap_p); 3088 3089 idr_remove(&lpfc_hba_index, phba->brd_no); 3090 3091 kfree(phba); 3092 3093 pci_release_selected_regions(pdev, bars); 3094 pci_disable_device(pdev); 3095 } 3096 3097 /** 3098 * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management. 3099 * @pdev: pointer to PCI device 3100 * @msg: power management message 3101 * 3102 * This routine is to be registered to the kernel's PCI subsystem to support 3103 * system Power Management (PM). When PM invokes this method, it quiesces the 3104 * device by stopping the driver's worker thread for the device, turning off 3105 * device's interrupt and DMA, and bring the device offline. Note that as the 3106 * driver implements the minimum PM requirements to a power-aware driver's PM 3107 * support for suspend/resume -- all the possible PM messages (SUSPEND, 3108 * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND 3109 * and the driver will fully reinitialize its device during resume() method 3110 * call, the driver will set device to PCI_D3hot state in PCI config space 3111 * instead of setting it according to the @msg provided by the PM. 3112 * 3113 * Return code 3114 * 0 - driver suspended the device 3115 * Error otherwise 3116 **/ 3117 static int 3118 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 3119 { 3120 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3121 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3122 3123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3124 "0473 PCI device Power Management suspend.\n"); 3125 3126 /* Bring down the device */ 3127 lpfc_offline_prep(phba); 3128 lpfc_offline(phba); 3129 kthread_stop(phba->worker_thread); 3130 3131 /* Disable interrupt from device */ 3132 lpfc_disable_intr(phba); 3133 3134 /* Save device state to PCI config space */ 3135 pci_save_state(pdev); 3136 pci_set_power_state(pdev, PCI_D3hot); 3137 3138 return 0; 3139 } 3140 3141 /** 3142 * lpfc_pci_resume_one: lpfc PCI func to resume device for power management. 3143 * @pdev: pointer to PCI device 3144 * 3145 * This routine is to be registered to the kernel's PCI subsystem to support 3146 * system Power Management (PM). When PM invokes this method, it restores 3147 * the device's PCI config space state and fully reinitializes the device 3148 * and brings it online. Note that as the driver implements the minimum PM 3149 * requirements to a power-aware driver's PM for suspend/resume -- all 3150 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 3151 * method call will be treated as SUSPEND and the driver will fully 3152 * reinitialize its device during resume() method call, the device will be 3153 * set to PCI_D0 directly in PCI config space before restoring the state. 3154 * 3155 * Return code 3156 * 0 - driver suspended the device 3157 * Error otherwise 3158 **/ 3159 static int 3160 lpfc_pci_resume_one(struct pci_dev *pdev) 3161 { 3162 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3163 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3164 uint32_t intr_mode; 3165 int error; 3166 3167 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3168 "0452 PCI device Power Management resume.\n"); 3169 3170 /* Restore device state from PCI config space */ 3171 pci_set_power_state(pdev, PCI_D0); 3172 pci_restore_state(pdev); 3173 if (pdev->is_busmaster) 3174 pci_set_master(pdev); 3175 3176 /* Startup the kernel thread for this host adapter. */ 3177 phba->worker_thread = kthread_run(lpfc_do_work, phba, 3178 "lpfc_worker_%d", phba->brd_no); 3179 if (IS_ERR(phba->worker_thread)) { 3180 error = PTR_ERR(phba->worker_thread); 3181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3182 "0434 PM resume failed to start worker " 3183 "thread: error=x%x.\n", error); 3184 return error; 3185 } 3186 3187 /* Configure and enable interrupt */ 3188 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 3189 if (intr_mode == LPFC_INTR_ERROR) { 3190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3191 "0430 PM resume Failed to enable interrupt\n"); 3192 return -EIO; 3193 } else 3194 phba->intr_mode = intr_mode; 3195 3196 /* Restart HBA and bring it online */ 3197 lpfc_sli_brdrestart(phba); 3198 lpfc_online(phba); 3199 3200 /* Log the current active interrupt mode */ 3201 lpfc_log_intr_mode(phba, phba->intr_mode); 3202 3203 return 0; 3204 } 3205 3206 /** 3207 * lpfc_io_error_detected: Driver method for handling PCI I/O error detected. 3208 * @pdev: pointer to PCI device. 3209 * @state: the current PCI connection state. 3210 * 3211 * This routine is registered to the PCI subsystem for error handling. This 3212 * function is called by the PCI subsystem after a PCI bus error affecting 3213 * this device has been detected. When this function is invoked, it will 3214 * need to stop all the I/Os and interrupt(s) to the device. Once that is 3215 * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to 3216 * perform proper recovery as desired. 3217 * 3218 * Return codes 3219 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 3220 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 3221 **/ 3222 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, 3223 pci_channel_state_t state) 3224 { 3225 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3226 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3227 struct lpfc_sli *psli = &phba->sli; 3228 struct lpfc_sli_ring *pring; 3229 3230 if (state == pci_channel_io_perm_failure) { 3231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3232 "0472 PCI channel I/O permanent failure\n"); 3233 /* Block all SCSI devices' I/Os on the host */ 3234 lpfc_scsi_dev_block(phba); 3235 /* Clean up all driver's outstanding SCSI I/Os */ 3236 lpfc_sli_flush_fcp_rings(phba); 3237 return PCI_ERS_RESULT_DISCONNECT; 3238 } 3239 3240 pci_disable_device(pdev); 3241 /* 3242 * There may be I/Os dropped by the firmware. 3243 * Error iocb (I/O) on txcmplq and let the SCSI layer 3244 * retry it after re-establishing link. 3245 */ 3246 pring = &psli->ring[psli->fcp_ring]; 3247 lpfc_sli_abort_iocb_ring(phba, pring); 3248 3249 /* Disable interrupt */ 3250 lpfc_disable_intr(phba); 3251 3252 /* Request a slot reset. */ 3253 return PCI_ERS_RESULT_NEED_RESET; 3254 } 3255 3256 /** 3257 * lpfc_io_slot_reset: Restart a PCI device from scratch. 3258 * @pdev: pointer to PCI device. 3259 * 3260 * This routine is registered to the PCI subsystem for error handling. This is 3261 * called after PCI bus has been reset to restart the PCI card from scratch, 3262 * as if from a cold-boot. During the PCI subsystem error recovery, after the 3263 * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform 3264 * proper error recovery and then call this routine before calling the .resume 3265 * method to recover the device. This function will initialize the HBA device, 3266 * enable the interrupt, but it will just put the HBA to offline state without 3267 * passing any I/O traffic. 3268 * 3269 * Return codes 3270 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 3271 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 3272 */ 3273 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) 3274 { 3275 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3276 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3277 struct lpfc_sli *psli = &phba->sli; 3278 uint32_t intr_mode; 3279 3280 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 3281 if (pci_enable_device_mem(pdev)) { 3282 printk(KERN_ERR "lpfc: Cannot re-enable " 3283 "PCI device after reset.\n"); 3284 return PCI_ERS_RESULT_DISCONNECT; 3285 } 3286 3287 pci_restore_state(pdev); 3288 if (pdev->is_busmaster) 3289 pci_set_master(pdev); 3290 3291 spin_lock_irq(&phba->hbalock); 3292 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3293 spin_unlock_irq(&phba->hbalock); 3294 3295 /* Configure and enable interrupt */ 3296 intr_mode = lpfc_enable_intr(phba, phba->intr_mode); 3297 if (intr_mode == LPFC_INTR_ERROR) { 3298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3299 "0427 Cannot re-enable interrupt after " 3300 "slot reset.\n"); 3301 return PCI_ERS_RESULT_DISCONNECT; 3302 } else 3303 phba->intr_mode = intr_mode; 3304 3305 /* Take device offline; this will perform cleanup */ 3306 lpfc_offline(phba); 3307 lpfc_sli_brdrestart(phba); 3308 3309 /* Log the current active interrupt mode */ 3310 lpfc_log_intr_mode(phba, phba->intr_mode); 3311 3312 return PCI_ERS_RESULT_RECOVERED; 3313 } 3314 3315 /** 3316 * lpfc_io_resume: Resume PCI I/O operation. 3317 * @pdev: pointer to PCI device 3318 * 3319 * This routine is registered to the PCI subsystem for error handling. It is 3320 * called when kernel error recovery tells the lpfc driver that it is ok to 3321 * resume normal PCI operation after PCI bus error recovery. After this call, 3322 * traffic can start to flow from this device again. 3323 */ 3324 static void lpfc_io_resume(struct pci_dev *pdev) 3325 { 3326 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3327 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 3328 3329 lpfc_online(phba); 3330 } 3331 3332 static struct pci_device_id lpfc_id_table[] = { 3333 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 3334 PCI_ANY_ID, PCI_ANY_ID, }, 3335 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 3336 PCI_ANY_ID, PCI_ANY_ID, }, 3337 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 3338 PCI_ANY_ID, PCI_ANY_ID, }, 3339 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 3340 PCI_ANY_ID, PCI_ANY_ID, }, 3341 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 3342 PCI_ANY_ID, PCI_ANY_ID, }, 3343 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 3344 PCI_ANY_ID, PCI_ANY_ID, }, 3345 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 3346 PCI_ANY_ID, PCI_ANY_ID, }, 3347 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 3348 PCI_ANY_ID, PCI_ANY_ID, }, 3349 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 3350 PCI_ANY_ID, PCI_ANY_ID, }, 3351 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 3352 PCI_ANY_ID, PCI_ANY_ID, }, 3353 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 3354 PCI_ANY_ID, PCI_ANY_ID, }, 3355 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 3356 PCI_ANY_ID, PCI_ANY_ID, }, 3357 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 3358 PCI_ANY_ID, PCI_ANY_ID, }, 3359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 3360 PCI_ANY_ID, PCI_ANY_ID, }, 3361 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 3362 PCI_ANY_ID, PCI_ANY_ID, }, 3363 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 3364 PCI_ANY_ID, PCI_ANY_ID, }, 3365 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 3366 PCI_ANY_ID, PCI_ANY_ID, }, 3367 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 3368 PCI_ANY_ID, PCI_ANY_ID, }, 3369 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 3370 PCI_ANY_ID, PCI_ANY_ID, }, 3371 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 3372 PCI_ANY_ID, PCI_ANY_ID, }, 3373 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 3374 PCI_ANY_ID, PCI_ANY_ID, }, 3375 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 3376 PCI_ANY_ID, PCI_ANY_ID, }, 3377 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 3378 PCI_ANY_ID, PCI_ANY_ID, }, 3379 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 3380 PCI_ANY_ID, PCI_ANY_ID, }, 3381 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 3382 PCI_ANY_ID, PCI_ANY_ID, }, 3383 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 3384 PCI_ANY_ID, PCI_ANY_ID, }, 3385 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 3386 PCI_ANY_ID, PCI_ANY_ID, }, 3387 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 3388 PCI_ANY_ID, PCI_ANY_ID, }, 3389 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 3390 PCI_ANY_ID, PCI_ANY_ID, }, 3391 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 3392 PCI_ANY_ID, PCI_ANY_ID, }, 3393 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 3394 PCI_ANY_ID, PCI_ANY_ID, }, 3395 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 3396 PCI_ANY_ID, PCI_ANY_ID, }, 3397 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 3398 PCI_ANY_ID, PCI_ANY_ID, }, 3399 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 3400 PCI_ANY_ID, PCI_ANY_ID, }, 3401 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 3402 PCI_ANY_ID, PCI_ANY_ID, }, 3403 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 3404 PCI_ANY_ID, PCI_ANY_ID, }, 3405 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 3406 PCI_ANY_ID, PCI_ANY_ID, }, 3407 { 0 } 3408 }; 3409 3410 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 3411 3412 static struct pci_error_handlers lpfc_err_handler = { 3413 .error_detected = lpfc_io_error_detected, 3414 .slot_reset = lpfc_io_slot_reset, 3415 .resume = lpfc_io_resume, 3416 }; 3417 3418 static struct pci_driver lpfc_driver = { 3419 .name = LPFC_DRIVER_NAME, 3420 .id_table = lpfc_id_table, 3421 .probe = lpfc_pci_probe_one, 3422 .remove = __devexit_p(lpfc_pci_remove_one), 3423 .suspend = lpfc_pci_suspend_one, 3424 .resume = lpfc_pci_resume_one, 3425 .err_handler = &lpfc_err_handler, 3426 }; 3427 3428 /** 3429 * lpfc_init: lpfc module initialization routine. 3430 * 3431 * This routine is to be invoked when the lpfc module is loaded into the 3432 * kernel. The special kernel macro module_init() is used to indicate the 3433 * role of this routine to the kernel as lpfc module entry point. 3434 * 3435 * Return codes 3436 * 0 - successful 3437 * -ENOMEM - FC attach transport failed 3438 * all others - failed 3439 */ 3440 static int __init 3441 lpfc_init(void) 3442 { 3443 int error = 0; 3444 3445 printk(LPFC_MODULE_DESC "\n"); 3446 printk(LPFC_COPYRIGHT "\n"); 3447 3448 if (lpfc_enable_npiv) { 3449 lpfc_transport_functions.vport_create = lpfc_vport_create; 3450 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 3451 } 3452 lpfc_transport_template = 3453 fc_attach_transport(&lpfc_transport_functions); 3454 if (lpfc_transport_template == NULL) 3455 return -ENOMEM; 3456 if (lpfc_enable_npiv) { 3457 lpfc_vport_transport_template = 3458 fc_attach_transport(&lpfc_vport_transport_functions); 3459 if (lpfc_vport_transport_template == NULL) { 3460 fc_release_transport(lpfc_transport_template); 3461 return -ENOMEM; 3462 } 3463 } 3464 error = pci_register_driver(&lpfc_driver); 3465 if (error) { 3466 fc_release_transport(lpfc_transport_template); 3467 if (lpfc_enable_npiv) 3468 fc_release_transport(lpfc_vport_transport_template); 3469 } 3470 3471 return error; 3472 } 3473 3474 /** 3475 * lpfc_exit: lpfc module removal routine. 3476 * 3477 * This routine is invoked when the lpfc module is removed from the kernel. 3478 * The special kernel macro module_exit() is used to indicate the role of 3479 * this routine to the kernel as lpfc module exit point. 3480 */ 3481 static void __exit 3482 lpfc_exit(void) 3483 { 3484 pci_unregister_driver(&lpfc_driver); 3485 fc_release_transport(lpfc_transport_template); 3486 if (lpfc_enable_npiv) 3487 fc_release_transport(lpfc_vport_transport_template); 3488 if (_dump_buf_data) { 3489 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data " 3490 "at 0x%p\n", 3491 (1L << _dump_buf_data_order), _dump_buf_data); 3492 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 3493 } 3494 3495 if (_dump_buf_dif) { 3496 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif " 3497 "at 0x%p\n", 3498 (1L << _dump_buf_dif_order), _dump_buf_dif); 3499 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 3500 } 3501 } 3502 3503 module_init(lpfc_init); 3504 module_exit(lpfc_exit); 3505 MODULE_LICENSE("GPL"); 3506 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 3507 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 3508 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 3509