1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/spinlock.h> 30 #include <linux/ctype.h> 31 #include <linux/aer.h> 32 #include <linux/slab.h> 33 34 #include <scsi/scsi.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_transport_fc.h> 38 39 #include "lpfc_hw4.h" 40 #include "lpfc_hw.h" 41 #include "lpfc_sli.h" 42 #include "lpfc_sli4.h" 43 #include "lpfc_nl.h" 44 #include "lpfc_disc.h" 45 #include "lpfc_scsi.h" 46 #include "lpfc.h" 47 #include "lpfc_logmsg.h" 48 #include "lpfc_crtn.h" 49 #include "lpfc_vport.h" 50 #include "lpfc_version.h" 51 52 char *_dump_buf_data; 53 unsigned long _dump_buf_data_order; 54 char *_dump_buf_dif; 55 unsigned long _dump_buf_dif_order; 56 spinlock_t _dump_buf_lock; 57 58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59 static int lpfc_post_rcv_buf(struct lpfc_hba *); 60 static int lpfc_sli4_queue_create(struct lpfc_hba *); 61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *); 62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 63 static int lpfc_setup_endian_order(struct lpfc_hba *); 64 static int lpfc_sli4_read_config(struct lpfc_hba *); 65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 66 static void lpfc_free_sgl_list(struct lpfc_hba *); 67 static int lpfc_init_sgl_list(struct lpfc_hba *); 68 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 69 static void lpfc_free_active_sgl(struct lpfc_hba *); 70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 75 76 static struct scsi_transport_template *lpfc_transport_template = NULL; 77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 78 static DEFINE_IDR(lpfc_hba_index); 79 80 /** 81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 82 * @phba: pointer to lpfc hba data structure. 83 * 84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 85 * mailbox command. It retrieves the revision information from the HBA and 86 * collects the Vital Product Data (VPD) about the HBA for preparing the 87 * configuration of the HBA. 88 * 89 * Return codes: 90 * 0 - success. 91 * -ERESTART - requests the SLI layer to reset the HBA and try again. 92 * Any other value - indicates an error. 93 **/ 94 int 95 lpfc_config_port_prep(struct lpfc_hba *phba) 96 { 97 lpfc_vpd_t *vp = &phba->vpd; 98 int i = 0, rc; 99 LPFC_MBOXQ_t *pmb; 100 MAILBOX_t *mb; 101 char *lpfc_vpd_data = NULL; 102 uint16_t offset = 0; 103 static char licensed[56] = 104 "key unlock for use with gnu public licensed code only\0"; 105 static int init_key = 1; 106 107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 108 if (!pmb) { 109 phba->link_state = LPFC_HBA_ERROR; 110 return -ENOMEM; 111 } 112 113 mb = &pmb->u.mb; 114 phba->link_state = LPFC_INIT_MBX_CMDS; 115 116 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 117 if (init_key) { 118 uint32_t *ptext = (uint32_t *) licensed; 119 120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 121 *ptext = cpu_to_be32(*ptext); 122 init_key = 0; 123 } 124 125 lpfc_read_nv(phba, pmb); 126 memset((char*)mb->un.varRDnvp.rsvd3, 0, 127 sizeof (mb->un.varRDnvp.rsvd3)); 128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 129 sizeof (licensed)); 130 131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 132 133 if (rc != MBX_SUCCESS) { 134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 135 "0324 Config Port initialization " 136 "error, mbxCmd x%x READ_NVPARM, " 137 "mbxStatus x%x\n", 138 mb->mbxCommand, mb->mbxStatus); 139 mempool_free(pmb, phba->mbox_mem_pool); 140 return -ERESTART; 141 } 142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 143 sizeof(phba->wwnn)); 144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 145 sizeof(phba->wwpn)); 146 } 147 148 phba->sli3_options = 0x0; 149 150 /* Setup and issue mailbox READ REV command */ 151 lpfc_read_rev(phba, pmb); 152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 153 if (rc != MBX_SUCCESS) { 154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 155 "0439 Adapter failed to init, mbxCmd x%x " 156 "READ_REV, mbxStatus x%x\n", 157 mb->mbxCommand, mb->mbxStatus); 158 mempool_free( pmb, phba->mbox_mem_pool); 159 return -ERESTART; 160 } 161 162 163 /* 164 * The value of rr must be 1 since the driver set the cv field to 1. 165 * This setting requires the FW to set all revision fields. 166 */ 167 if (mb->un.varRdRev.rr == 0) { 168 vp->rev.rBit = 0; 169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 170 "0440 Adapter failed to init, READ_REV has " 171 "missing revision information.\n"); 172 mempool_free(pmb, phba->mbox_mem_pool); 173 return -ERESTART; 174 } 175 176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 177 mempool_free(pmb, phba->mbox_mem_pool); 178 return -EINVAL; 179 } 180 181 /* Save information as VPD data */ 182 vp->rev.rBit = 1; 183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 188 vp->rev.biuRev = mb->un.varRdRev.biuRev; 189 vp->rev.smRev = mb->un.varRdRev.smRev; 190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 191 vp->rev.endecRev = mb->un.varRdRev.endecRev; 192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 198 199 /* If the sli feature level is less then 9, we must 200 * tear down all RPIs and VPIs on link down if NPIV 201 * is enabled. 202 */ 203 if (vp->rev.feaLevelHigh < 9) 204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 205 206 if (lpfc_is_LC_HBA(phba->pcidev->device)) 207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 208 sizeof (phba->RandomData)); 209 210 /* Get adapter VPD information */ 211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 212 if (!lpfc_vpd_data) 213 goto out_free_mbox; 214 215 do { 216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 218 219 if (rc != MBX_SUCCESS) { 220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 221 "0441 VPD not present on adapter, " 222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 223 mb->mbxCommand, mb->mbxStatus); 224 mb->un.varDmp.word_cnt = 0; 225 } 226 /* dump mem may return a zero when finished or we got a 227 * mailbox error, either way we are done. 228 */ 229 if (mb->un.varDmp.word_cnt == 0) 230 break; 231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 234 lpfc_vpd_data + offset, 235 mb->un.varDmp.word_cnt); 236 offset += mb->un.varDmp.word_cnt; 237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 239 240 kfree(lpfc_vpd_data); 241 out_free_mbox: 242 mempool_free(pmb, phba->mbox_mem_pool); 243 return 0; 244 } 245 246 /** 247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 248 * @phba: pointer to lpfc hba data structure. 249 * @pmboxq: pointer to the driver internal queue element for mailbox command. 250 * 251 * This is the completion handler for driver's configuring asynchronous event 252 * mailbox command to the device. If the mailbox command returns successfully, 253 * it will set internal async event support flag to 1; otherwise, it will 254 * set internal async event support flag to 0. 255 **/ 256 static void 257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 258 { 259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 260 phba->temp_sensor_support = 1; 261 else 262 phba->temp_sensor_support = 0; 263 mempool_free(pmboxq, phba->mbox_mem_pool); 264 return; 265 } 266 267 /** 268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 269 * @phba: pointer to lpfc hba data structure. 270 * @pmboxq: pointer to the driver internal queue element for mailbox command. 271 * 272 * This is the completion handler for dump mailbox command for getting 273 * wake up parameters. When this command complete, the response contain 274 * Option rom version of the HBA. This function translate the version number 275 * into a human readable string and store it in OptionROMVersion. 276 **/ 277 static void 278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 279 { 280 struct prog_id *prg; 281 uint32_t prog_id_word; 282 char dist = ' '; 283 /* character array used for decoding dist type. */ 284 char dist_char[] = "nabx"; 285 286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 287 mempool_free(pmboxq, phba->mbox_mem_pool); 288 return; 289 } 290 291 prg = (struct prog_id *) &prog_id_word; 292 293 /* word 7 contain option rom version */ 294 prog_id_word = pmboxq->u.mb.un.varWords[7]; 295 296 /* Decode the Option rom version word to a readable string */ 297 if (prg->dist < 4) 298 dist = dist_char[prg->dist]; 299 300 if ((prg->dist == 3) && (prg->num == 0)) 301 sprintf(phba->OptionROMVersion, "%d.%d%d", 302 prg->ver, prg->rev, prg->lev); 303 else 304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 305 prg->ver, prg->rev, prg->lev, 306 dist, prg->num); 307 mempool_free(pmboxq, phba->mbox_mem_pool); 308 return; 309 } 310 311 /** 312 * lpfc_config_port_post - Perform lpfc initialization after config port 313 * @phba: pointer to lpfc hba data structure. 314 * 315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 316 * command call. It performs all internal resource and state setups on the 317 * port: post IOCB buffers, enable appropriate host interrupt attentions, 318 * ELS ring timers, etc. 319 * 320 * Return codes 321 * 0 - success. 322 * Any other value - error. 323 **/ 324 int 325 lpfc_config_port_post(struct lpfc_hba *phba) 326 { 327 struct lpfc_vport *vport = phba->pport; 328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 329 LPFC_MBOXQ_t *pmb; 330 MAILBOX_t *mb; 331 struct lpfc_dmabuf *mp; 332 struct lpfc_sli *psli = &phba->sli; 333 uint32_t status, timeout; 334 int i, j; 335 int rc; 336 337 spin_lock_irq(&phba->hbalock); 338 /* 339 * If the Config port completed correctly the HBA is not 340 * over heated any more. 341 */ 342 if (phba->over_temp_state == HBA_OVER_TEMP) 343 phba->over_temp_state = HBA_NORMAL_TEMP; 344 spin_unlock_irq(&phba->hbalock); 345 346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 347 if (!pmb) { 348 phba->link_state = LPFC_HBA_ERROR; 349 return -ENOMEM; 350 } 351 mb = &pmb->u.mb; 352 353 /* Get login parameters for NID. */ 354 rc = lpfc_read_sparam(phba, pmb, 0); 355 if (rc) { 356 mempool_free(pmb, phba->mbox_mem_pool); 357 return -ENOMEM; 358 } 359 360 pmb->vport = vport; 361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 363 "0448 Adapter failed init, mbxCmd x%x " 364 "READ_SPARM mbxStatus x%x\n", 365 mb->mbxCommand, mb->mbxStatus); 366 phba->link_state = LPFC_HBA_ERROR; 367 mp = (struct lpfc_dmabuf *) pmb->context1; 368 mempool_free(pmb, phba->mbox_mem_pool); 369 lpfc_mbuf_free(phba, mp->virt, mp->phys); 370 kfree(mp); 371 return -EIO; 372 } 373 374 mp = (struct lpfc_dmabuf *) pmb->context1; 375 376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 377 lpfc_mbuf_free(phba, mp->virt, mp->phys); 378 kfree(mp); 379 pmb->context1 = NULL; 380 381 if (phba->cfg_soft_wwnn) 382 u64_to_wwn(phba->cfg_soft_wwnn, 383 vport->fc_sparam.nodeName.u.wwn); 384 if (phba->cfg_soft_wwpn) 385 u64_to_wwn(phba->cfg_soft_wwpn, 386 vport->fc_sparam.portName.u.wwn); 387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 388 sizeof (struct lpfc_name)); 389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 390 sizeof (struct lpfc_name)); 391 392 /* Update the fc_host data structures with new wwn. */ 393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 395 fc_host_max_npiv_vports(shost) = phba->max_vpi; 396 397 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 398 /* This should be consolidated into parse_vpd ? - mr */ 399 if (phba->SerialNumber[0] == 0) { 400 uint8_t *outptr; 401 402 outptr = &vport->fc_nodename.u.s.IEEE[0]; 403 for (i = 0; i < 12; i++) { 404 status = *outptr++; 405 j = ((status & 0xf0) >> 4); 406 if (j <= 9) 407 phba->SerialNumber[i] = 408 (char)((uint8_t) 0x30 + (uint8_t) j); 409 else 410 phba->SerialNumber[i] = 411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 412 i++; 413 j = (status & 0xf); 414 if (j <= 9) 415 phba->SerialNumber[i] = 416 (char)((uint8_t) 0x30 + (uint8_t) j); 417 else 418 phba->SerialNumber[i] = 419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 420 } 421 } 422 423 lpfc_read_config(phba, pmb); 424 pmb->vport = vport; 425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 427 "0453 Adapter failed to init, mbxCmd x%x " 428 "READ_CONFIG, mbxStatus x%x\n", 429 mb->mbxCommand, mb->mbxStatus); 430 phba->link_state = LPFC_HBA_ERROR; 431 mempool_free( pmb, phba->mbox_mem_pool); 432 return -EIO; 433 } 434 435 /* Check if the port is disabled */ 436 lpfc_sli_read_link_ste(phba); 437 438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 440 phba->cfg_hba_queue_depth = 441 (mb->un.varRdConfig.max_xri + 1) - 442 lpfc_sli4_get_els_iocb_cnt(phba); 443 444 phba->lmt = mb->un.varRdConfig.lmt; 445 446 /* Get the default values for Model Name and Description */ 447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 448 449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) 450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) 451 && !(phba->lmt & LMT_1Gb)) 452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) 453 && !(phba->lmt & LMT_2Gb)) 454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) 455 && !(phba->lmt & LMT_4Gb)) 456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) 457 && !(phba->lmt & LMT_8Gb)) 458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) 459 && !(phba->lmt & LMT_10Gb)) 460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) 461 && !(phba->lmt & LMT_16Gb))) { 462 /* Reset link speed to auto */ 463 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, 464 "1302 Invalid speed for this board: " 465 "Reset link speed to auto: x%x\n", 466 phba->cfg_link_speed); 467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 468 } 469 470 phba->link_state = LPFC_LINK_DOWN; 471 472 /* Only process IOCBs on ELS ring till hba_state is READY */ 473 if (psli->ring[psli->extra_ring].cmdringaddr) 474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 475 if (psli->ring[psli->fcp_ring].cmdringaddr) 476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 477 if (psli->ring[psli->next_ring].cmdringaddr) 478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 479 480 /* Post receive buffers for desired rings */ 481 if (phba->sli_rev != 3) 482 lpfc_post_rcv_buf(phba); 483 484 /* 485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 486 */ 487 if (phba->intr_type == MSIX) { 488 rc = lpfc_config_msi(phba, pmb); 489 if (rc) { 490 mempool_free(pmb, phba->mbox_mem_pool); 491 return -EIO; 492 } 493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 494 if (rc != MBX_SUCCESS) { 495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 496 "0352 Config MSI mailbox command " 497 "failed, mbxCmd x%x, mbxStatus x%x\n", 498 pmb->u.mb.mbxCommand, 499 pmb->u.mb.mbxStatus); 500 mempool_free(pmb, phba->mbox_mem_pool); 501 return -EIO; 502 } 503 } 504 505 spin_lock_irq(&phba->hbalock); 506 /* Initialize ERATT handling flag */ 507 phba->hba_flag &= ~HBA_ERATT_HANDLED; 508 509 /* Enable appropriate host interrupts */ 510 status = readl(phba->HCregaddr); 511 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 512 if (psli->num_rings > 0) 513 status |= HC_R0INT_ENA; 514 if (psli->num_rings > 1) 515 status |= HC_R1INT_ENA; 516 if (psli->num_rings > 2) 517 status |= HC_R2INT_ENA; 518 if (psli->num_rings > 3) 519 status |= HC_R3INT_ENA; 520 521 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 522 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 523 status &= ~(HC_R0INT_ENA); 524 525 writel(status, phba->HCregaddr); 526 readl(phba->HCregaddr); /* flush */ 527 spin_unlock_irq(&phba->hbalock); 528 529 /* Set up ring-0 (ELS) timer */ 530 timeout = phba->fc_ratov * 2; 531 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); 532 /* Set up heart beat (HB) timer */ 533 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 534 phba->hb_outstanding = 0; 535 phba->last_completion_time = jiffies; 536 /* Set up error attention (ERATT) polling timer */ 537 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 538 539 if (phba->hba_flag & LINK_DISABLED) { 540 lpfc_printf_log(phba, 541 KERN_ERR, LOG_INIT, 542 "2598 Adapter Link is disabled.\n"); 543 lpfc_down_link(phba, pmb); 544 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 545 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 546 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 547 lpfc_printf_log(phba, 548 KERN_ERR, LOG_INIT, 549 "2599 Adapter failed to issue DOWN_LINK" 550 " mbox command rc 0x%x\n", rc); 551 552 mempool_free(pmb, phba->mbox_mem_pool); 553 return -EIO; 554 } 555 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 556 lpfc_init_link(phba, pmb, phba->cfg_topology, 557 phba->cfg_link_speed); 558 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 559 lpfc_set_loopback_flag(phba); 560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 561 if (rc != MBX_SUCCESS) { 562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 563 "0454 Adapter failed to init, mbxCmd x%x " 564 "INIT_LINK, mbxStatus x%x\n", 565 mb->mbxCommand, mb->mbxStatus); 566 567 /* Clear all interrupt enable conditions */ 568 writel(0, phba->HCregaddr); 569 readl(phba->HCregaddr); /* flush */ 570 /* Clear all pending interrupts */ 571 writel(0xffffffff, phba->HAregaddr); 572 readl(phba->HAregaddr); /* flush */ 573 574 phba->link_state = LPFC_HBA_ERROR; 575 if (rc != MBX_BUSY) 576 mempool_free(pmb, phba->mbox_mem_pool); 577 return -EIO; 578 } 579 } 580 /* MBOX buffer will be freed in mbox compl */ 581 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 582 if (!pmb) { 583 phba->link_state = LPFC_HBA_ERROR; 584 return -ENOMEM; 585 } 586 587 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 588 pmb->mbox_cmpl = lpfc_config_async_cmpl; 589 pmb->vport = phba->pport; 590 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 591 592 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 593 lpfc_printf_log(phba, 594 KERN_ERR, 595 LOG_INIT, 596 "0456 Adapter failed to issue " 597 "ASYNCEVT_ENABLE mbox status x%x\n", 598 rc); 599 mempool_free(pmb, phba->mbox_mem_pool); 600 } 601 602 /* Get Option rom version */ 603 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 604 if (!pmb) { 605 phba->link_state = LPFC_HBA_ERROR; 606 return -ENOMEM; 607 } 608 609 lpfc_dump_wakeup_param(phba, pmb); 610 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 611 pmb->vport = phba->pport; 612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 613 614 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 616 "to get Option ROM version status x%x\n", rc); 617 mempool_free(pmb, phba->mbox_mem_pool); 618 } 619 620 return 0; 621 } 622 623 /** 624 * lpfc_hba_init_link - Initialize the FC link 625 * @phba: pointer to lpfc hba data structure. 626 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 627 * 628 * This routine will issue the INIT_LINK mailbox command call. 629 * It is available to other drivers through the lpfc_hba data 630 * structure for use as a delayed link up mechanism with the 631 * module parameter lpfc_suppress_link_up. 632 * 633 * Return code 634 * 0 - success 635 * Any other value - error 636 **/ 637 int 638 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 639 { 640 struct lpfc_vport *vport = phba->pport; 641 LPFC_MBOXQ_t *pmb; 642 MAILBOX_t *mb; 643 int rc; 644 645 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 646 if (!pmb) { 647 phba->link_state = LPFC_HBA_ERROR; 648 return -ENOMEM; 649 } 650 mb = &pmb->u.mb; 651 pmb->vport = vport; 652 653 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 654 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 655 lpfc_set_loopback_flag(phba); 656 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 659 "0498 Adapter failed to init, mbxCmd x%x " 660 "INIT_LINK, mbxStatus x%x\n", 661 mb->mbxCommand, mb->mbxStatus); 662 if (phba->sli_rev <= LPFC_SLI_REV3) { 663 /* Clear all interrupt enable conditions */ 664 writel(0, phba->HCregaddr); 665 readl(phba->HCregaddr); /* flush */ 666 /* Clear all pending interrupts */ 667 writel(0xffffffff, phba->HAregaddr); 668 readl(phba->HAregaddr); /* flush */ 669 } 670 phba->link_state = LPFC_HBA_ERROR; 671 if (rc != MBX_BUSY || flag == MBX_POLL) 672 mempool_free(pmb, phba->mbox_mem_pool); 673 return -EIO; 674 } 675 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 676 if (flag == MBX_POLL) 677 mempool_free(pmb, phba->mbox_mem_pool); 678 679 return 0; 680 } 681 682 /** 683 * lpfc_hba_down_link - this routine downs the FC link 684 * @phba: pointer to lpfc hba data structure. 685 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 686 * 687 * This routine will issue the DOWN_LINK mailbox command call. 688 * It is available to other drivers through the lpfc_hba data 689 * structure for use to stop the link. 690 * 691 * Return code 692 * 0 - success 693 * Any other value - error 694 **/ 695 int 696 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 697 { 698 LPFC_MBOXQ_t *pmb; 699 int rc; 700 701 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 702 if (!pmb) { 703 phba->link_state = LPFC_HBA_ERROR; 704 return -ENOMEM; 705 } 706 707 lpfc_printf_log(phba, 708 KERN_ERR, LOG_INIT, 709 "0491 Adapter Link is disabled.\n"); 710 lpfc_down_link(phba, pmb); 711 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 712 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 713 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 714 lpfc_printf_log(phba, 715 KERN_ERR, LOG_INIT, 716 "2522 Adapter failed to issue DOWN_LINK" 717 " mbox command rc 0x%x\n", rc); 718 719 mempool_free(pmb, phba->mbox_mem_pool); 720 return -EIO; 721 } 722 if (flag == MBX_POLL) 723 mempool_free(pmb, phba->mbox_mem_pool); 724 725 return 0; 726 } 727 728 /** 729 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 730 * @phba: pointer to lpfc HBA data structure. 731 * 732 * This routine will do LPFC uninitialization before the HBA is reset when 733 * bringing down the SLI Layer. 734 * 735 * Return codes 736 * 0 - success. 737 * Any other value - error. 738 **/ 739 int 740 lpfc_hba_down_prep(struct lpfc_hba *phba) 741 { 742 struct lpfc_vport **vports; 743 int i; 744 745 if (phba->sli_rev <= LPFC_SLI_REV3) { 746 /* Disable interrupts */ 747 writel(0, phba->HCregaddr); 748 readl(phba->HCregaddr); /* flush */ 749 } 750 751 if (phba->pport->load_flag & FC_UNLOADING) 752 lpfc_cleanup_discovery_resources(phba->pport); 753 else { 754 vports = lpfc_create_vport_work_array(phba); 755 if (vports != NULL) 756 for (i = 0; i <= phba->max_vports && 757 vports[i] != NULL; i++) 758 lpfc_cleanup_discovery_resources(vports[i]); 759 lpfc_destroy_vport_work_array(phba, vports); 760 } 761 return 0; 762 } 763 764 /** 765 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 766 * @phba: pointer to lpfc HBA data structure. 767 * 768 * This routine will do uninitialization after the HBA is reset when bring 769 * down the SLI Layer. 770 * 771 * Return codes 772 * 0 - success. 773 * Any other value - error. 774 **/ 775 static int 776 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 777 { 778 struct lpfc_sli *psli = &phba->sli; 779 struct lpfc_sli_ring *pring; 780 struct lpfc_dmabuf *mp, *next_mp; 781 LIST_HEAD(completions); 782 int i; 783 784 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 785 lpfc_sli_hbqbuf_free_all(phba); 786 else { 787 /* Cleanup preposted buffers on the ELS ring */ 788 pring = &psli->ring[LPFC_ELS_RING]; 789 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 790 list_del(&mp->list); 791 pring->postbufq_cnt--; 792 lpfc_mbuf_free(phba, mp->virt, mp->phys); 793 kfree(mp); 794 } 795 } 796 797 spin_lock_irq(&phba->hbalock); 798 for (i = 0; i < psli->num_rings; i++) { 799 pring = &psli->ring[i]; 800 801 /* At this point in time the HBA is either reset or DOA. Either 802 * way, nothing should be on txcmplq as it will NEVER complete. 803 */ 804 list_splice_init(&pring->txcmplq, &completions); 805 pring->txcmplq_cnt = 0; 806 spin_unlock_irq(&phba->hbalock); 807 808 /* Cancel all the IOCBs from the completions list */ 809 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 810 IOERR_SLI_ABORTED); 811 812 lpfc_sli_abort_iocb_ring(phba, pring); 813 spin_lock_irq(&phba->hbalock); 814 } 815 spin_unlock_irq(&phba->hbalock); 816 817 return 0; 818 } 819 820 /** 821 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 822 * @phba: pointer to lpfc HBA data structure. 823 * 824 * This routine will do uninitialization after the HBA is reset when bring 825 * down the SLI Layer. 826 * 827 * Return codes 828 * 0 - success. 829 * Any other value - error. 830 **/ 831 static int 832 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 833 { 834 struct lpfc_scsi_buf *psb, *psb_next; 835 LIST_HEAD(aborts); 836 int ret; 837 unsigned long iflag = 0; 838 struct lpfc_sglq *sglq_entry = NULL; 839 840 ret = lpfc_hba_down_post_s3(phba); 841 if (ret) 842 return ret; 843 /* At this point in time the HBA is either reset or DOA. Either 844 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 845 * on the lpfc_sgl_list so that it can either be freed if the 846 * driver is unloading or reposted if the driver is restarting 847 * the port. 848 */ 849 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 850 /* scsl_buf_list */ 851 /* abts_sgl_list_lock required because worker thread uses this 852 * list. 853 */ 854 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 855 list_for_each_entry(sglq_entry, 856 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 857 sglq_entry->state = SGL_FREED; 858 859 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 860 &phba->sli4_hba.lpfc_sgl_list); 861 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 862 /* abts_scsi_buf_list_lock required because worker thread uses this 863 * list. 864 */ 865 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 866 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 867 &aborts); 868 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 869 spin_unlock_irq(&phba->hbalock); 870 871 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 872 psb->pCmd = NULL; 873 psb->status = IOSTAT_SUCCESS; 874 } 875 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); 876 list_splice(&aborts, &phba->lpfc_scsi_buf_list); 877 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); 878 return 0; 879 } 880 881 /** 882 * lpfc_hba_down_post - Wrapper func for hba down post routine 883 * @phba: pointer to lpfc HBA data structure. 884 * 885 * This routine wraps the actual SLI3 or SLI4 routine for performing 886 * uninitialization after the HBA is reset when bring down the SLI Layer. 887 * 888 * Return codes 889 * 0 - success. 890 * Any other value - error. 891 **/ 892 int 893 lpfc_hba_down_post(struct lpfc_hba *phba) 894 { 895 return (*phba->lpfc_hba_down_post)(phba); 896 } 897 898 /** 899 * lpfc_hb_timeout - The HBA-timer timeout handler 900 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 901 * 902 * This is the HBA-timer timeout handler registered to the lpfc driver. When 903 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 904 * work-port-events bitmap and the worker thread is notified. This timeout 905 * event will be used by the worker thread to invoke the actual timeout 906 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 907 * be performed in the timeout handler and the HBA timeout event bit shall 908 * be cleared by the worker thread after it has taken the event bitmap out. 909 **/ 910 static void 911 lpfc_hb_timeout(unsigned long ptr) 912 { 913 struct lpfc_hba *phba; 914 uint32_t tmo_posted; 915 unsigned long iflag; 916 917 phba = (struct lpfc_hba *)ptr; 918 919 /* Check for heart beat timeout conditions */ 920 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 921 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 922 if (!tmo_posted) 923 phba->pport->work_port_events |= WORKER_HB_TMO; 924 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 925 926 /* Tell the worker thread there is work to do */ 927 if (!tmo_posted) 928 lpfc_worker_wake_up(phba); 929 return; 930 } 931 932 /** 933 * lpfc_rrq_timeout - The RRQ-timer timeout handler 934 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 935 * 936 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 937 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 938 * work-port-events bitmap and the worker thread is notified. This timeout 939 * event will be used by the worker thread to invoke the actual timeout 940 * handler routine, lpfc_rrq_handler. Any periodical operations will 941 * be performed in the timeout handler and the RRQ timeout event bit shall 942 * be cleared by the worker thread after it has taken the event bitmap out. 943 **/ 944 static void 945 lpfc_rrq_timeout(unsigned long ptr) 946 { 947 struct lpfc_hba *phba; 948 uint32_t tmo_posted; 949 unsigned long iflag; 950 951 phba = (struct lpfc_hba *)ptr; 952 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 953 tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE; 954 if (!tmo_posted) 955 phba->hba_flag |= HBA_RRQ_ACTIVE; 956 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 957 if (!tmo_posted) 958 lpfc_worker_wake_up(phba); 959 } 960 961 /** 962 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 963 * @phba: pointer to lpfc hba data structure. 964 * @pmboxq: pointer to the driver internal queue element for mailbox command. 965 * 966 * This is the callback function to the lpfc heart-beat mailbox command. 967 * If configured, the lpfc driver issues the heart-beat mailbox command to 968 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 969 * heart-beat mailbox command is issued, the driver shall set up heart-beat 970 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 971 * heart-beat outstanding state. Once the mailbox command comes back and 972 * no error conditions detected, the heart-beat mailbox command timer is 973 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 974 * state is cleared for the next heart-beat. If the timer expired with the 975 * heart-beat outstanding state set, the driver will put the HBA offline. 976 **/ 977 static void 978 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 979 { 980 unsigned long drvr_flag; 981 982 spin_lock_irqsave(&phba->hbalock, drvr_flag); 983 phba->hb_outstanding = 0; 984 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 985 986 /* Check and reset heart-beat timer is necessary */ 987 mempool_free(pmboxq, phba->mbox_mem_pool); 988 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 989 !(phba->link_state == LPFC_HBA_ERROR) && 990 !(phba->pport->load_flag & FC_UNLOADING)) 991 mod_timer(&phba->hb_tmofunc, 992 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 993 return; 994 } 995 996 /** 997 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 998 * @phba: pointer to lpfc hba data structure. 999 * 1000 * This is the actual HBA-timer timeout handler to be invoked by the worker 1001 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1002 * handler performs any periodic operations needed for the device. If such 1003 * periodic event has already been attended to either in the interrupt handler 1004 * or by processing slow-ring or fast-ring events within the HBA-timer 1005 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1006 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1007 * is configured and there is no heart-beat mailbox command outstanding, a 1008 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1009 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1010 * to offline. 1011 **/ 1012 void 1013 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1014 { 1015 struct lpfc_vport **vports; 1016 LPFC_MBOXQ_t *pmboxq; 1017 struct lpfc_dmabuf *buf_ptr; 1018 int retval, i; 1019 struct lpfc_sli *psli = &phba->sli; 1020 LIST_HEAD(completions); 1021 1022 vports = lpfc_create_vport_work_array(phba); 1023 if (vports != NULL) 1024 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1025 lpfc_rcv_seq_check_edtov(vports[i]); 1026 lpfc_destroy_vport_work_array(phba, vports); 1027 1028 if ((phba->link_state == LPFC_HBA_ERROR) || 1029 (phba->pport->load_flag & FC_UNLOADING) || 1030 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1031 return; 1032 1033 spin_lock_irq(&phba->pport->work_port_lock); 1034 1035 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, 1036 jiffies)) { 1037 spin_unlock_irq(&phba->pport->work_port_lock); 1038 if (!phba->hb_outstanding) 1039 mod_timer(&phba->hb_tmofunc, 1040 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 1041 else 1042 mod_timer(&phba->hb_tmofunc, 1043 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1044 return; 1045 } 1046 spin_unlock_irq(&phba->pport->work_port_lock); 1047 1048 if (phba->elsbuf_cnt && 1049 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1050 spin_lock_irq(&phba->hbalock); 1051 list_splice_init(&phba->elsbuf, &completions); 1052 phba->elsbuf_cnt = 0; 1053 phba->elsbuf_prev_cnt = 0; 1054 spin_unlock_irq(&phba->hbalock); 1055 1056 while (!list_empty(&completions)) { 1057 list_remove_head(&completions, buf_ptr, 1058 struct lpfc_dmabuf, list); 1059 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1060 kfree(buf_ptr); 1061 } 1062 } 1063 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1064 1065 /* If there is no heart beat outstanding, issue a heartbeat command */ 1066 if (phba->cfg_enable_hba_heartbeat) { 1067 if (!phba->hb_outstanding) { 1068 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1069 (list_empty(&psli->mboxq))) { 1070 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1071 GFP_KERNEL); 1072 if (!pmboxq) { 1073 mod_timer(&phba->hb_tmofunc, 1074 jiffies + 1075 HZ * LPFC_HB_MBOX_INTERVAL); 1076 return; 1077 } 1078 1079 lpfc_heart_beat(phba, pmboxq); 1080 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1081 pmboxq->vport = phba->pport; 1082 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1083 MBX_NOWAIT); 1084 1085 if (retval != MBX_BUSY && 1086 retval != MBX_SUCCESS) { 1087 mempool_free(pmboxq, 1088 phba->mbox_mem_pool); 1089 mod_timer(&phba->hb_tmofunc, 1090 jiffies + 1091 HZ * LPFC_HB_MBOX_INTERVAL); 1092 return; 1093 } 1094 phba->skipped_hb = 0; 1095 phba->hb_outstanding = 1; 1096 } else if (time_before_eq(phba->last_completion_time, 1097 phba->skipped_hb)) { 1098 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1099 "2857 Last completion time not " 1100 " updated in %d ms\n", 1101 jiffies_to_msecs(jiffies 1102 - phba->last_completion_time)); 1103 } else 1104 phba->skipped_hb = jiffies; 1105 1106 mod_timer(&phba->hb_tmofunc, 1107 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1108 return; 1109 } else { 1110 /* 1111 * If heart beat timeout called with hb_outstanding set 1112 * we need to give the hb mailbox cmd a chance to 1113 * complete or TMO. 1114 */ 1115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1116 "0459 Adapter heartbeat still out" 1117 "standing:last compl time was %d ms.\n", 1118 jiffies_to_msecs(jiffies 1119 - phba->last_completion_time)); 1120 mod_timer(&phba->hb_tmofunc, 1121 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); 1122 } 1123 } 1124 } 1125 1126 /** 1127 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1128 * @phba: pointer to lpfc hba data structure. 1129 * 1130 * This routine is called to bring the HBA offline when HBA hardware error 1131 * other than Port Error 6 has been detected. 1132 **/ 1133 static void 1134 lpfc_offline_eratt(struct lpfc_hba *phba) 1135 { 1136 struct lpfc_sli *psli = &phba->sli; 1137 1138 spin_lock_irq(&phba->hbalock); 1139 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1140 spin_unlock_irq(&phba->hbalock); 1141 lpfc_offline_prep(phba); 1142 1143 lpfc_offline(phba); 1144 lpfc_reset_barrier(phba); 1145 spin_lock_irq(&phba->hbalock); 1146 lpfc_sli_brdreset(phba); 1147 spin_unlock_irq(&phba->hbalock); 1148 lpfc_hba_down_post(phba); 1149 lpfc_sli_brdready(phba, HS_MBRDY); 1150 lpfc_unblock_mgmt_io(phba); 1151 phba->link_state = LPFC_HBA_ERROR; 1152 return; 1153 } 1154 1155 /** 1156 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1157 * @phba: pointer to lpfc hba data structure. 1158 * 1159 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1160 * other than Port Error 6 has been detected. 1161 **/ 1162 static void 1163 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1164 { 1165 lpfc_offline_prep(phba); 1166 lpfc_offline(phba); 1167 lpfc_sli4_brdreset(phba); 1168 lpfc_hba_down_post(phba); 1169 lpfc_sli4_post_status_check(phba); 1170 lpfc_unblock_mgmt_io(phba); 1171 phba->link_state = LPFC_HBA_ERROR; 1172 } 1173 1174 /** 1175 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1176 * @phba: pointer to lpfc hba data structure. 1177 * 1178 * This routine is invoked to handle the deferred HBA hardware error 1179 * conditions. This type of error is indicated by HBA by setting ER1 1180 * and another ER bit in the host status register. The driver will 1181 * wait until the ER1 bit clears before handling the error condition. 1182 **/ 1183 static void 1184 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1185 { 1186 uint32_t old_host_status = phba->work_hs; 1187 struct lpfc_sli_ring *pring; 1188 struct lpfc_sli *psli = &phba->sli; 1189 1190 /* If the pci channel is offline, ignore possible errors, 1191 * since we cannot communicate with the pci card anyway. 1192 */ 1193 if (pci_channel_offline(phba->pcidev)) { 1194 spin_lock_irq(&phba->hbalock); 1195 phba->hba_flag &= ~DEFER_ERATT; 1196 spin_unlock_irq(&phba->hbalock); 1197 return; 1198 } 1199 1200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1201 "0479 Deferred Adapter Hardware Error " 1202 "Data: x%x x%x x%x\n", 1203 phba->work_hs, 1204 phba->work_status[0], phba->work_status[1]); 1205 1206 spin_lock_irq(&phba->hbalock); 1207 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1208 spin_unlock_irq(&phba->hbalock); 1209 1210 1211 /* 1212 * Firmware stops when it triggred erratt. That could cause the I/Os 1213 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1214 * SCSI layer retry it after re-establishing link. 1215 */ 1216 pring = &psli->ring[psli->fcp_ring]; 1217 lpfc_sli_abort_iocb_ring(phba, pring); 1218 1219 /* 1220 * There was a firmware error. Take the hba offline and then 1221 * attempt to restart it. 1222 */ 1223 lpfc_offline_prep(phba); 1224 lpfc_offline(phba); 1225 1226 /* Wait for the ER1 bit to clear.*/ 1227 while (phba->work_hs & HS_FFER1) { 1228 msleep(100); 1229 phba->work_hs = readl(phba->HSregaddr); 1230 /* If driver is unloading let the worker thread continue */ 1231 if (phba->pport->load_flag & FC_UNLOADING) { 1232 phba->work_hs = 0; 1233 break; 1234 } 1235 } 1236 1237 /* 1238 * This is to ptrotect against a race condition in which 1239 * first write to the host attention register clear the 1240 * host status register. 1241 */ 1242 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1243 phba->work_hs = old_host_status & ~HS_FFER1; 1244 1245 spin_lock_irq(&phba->hbalock); 1246 phba->hba_flag &= ~DEFER_ERATT; 1247 spin_unlock_irq(&phba->hbalock); 1248 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1249 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1250 } 1251 1252 static void 1253 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1254 { 1255 struct lpfc_board_event_header board_event; 1256 struct Scsi_Host *shost; 1257 1258 board_event.event_type = FC_REG_BOARD_EVENT; 1259 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1260 shost = lpfc_shost_from_vport(phba->pport); 1261 fc_host_post_vendor_event(shost, fc_get_event_number(), 1262 sizeof(board_event), 1263 (char *) &board_event, 1264 LPFC_NL_VENDOR_ID); 1265 } 1266 1267 /** 1268 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1269 * @phba: pointer to lpfc hba data structure. 1270 * 1271 * This routine is invoked to handle the following HBA hardware error 1272 * conditions: 1273 * 1 - HBA error attention interrupt 1274 * 2 - DMA ring index out of range 1275 * 3 - Mailbox command came back as unknown 1276 **/ 1277 static void 1278 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1279 { 1280 struct lpfc_vport *vport = phba->pport; 1281 struct lpfc_sli *psli = &phba->sli; 1282 struct lpfc_sli_ring *pring; 1283 uint32_t event_data; 1284 unsigned long temperature; 1285 struct temp_event temp_event_data; 1286 struct Scsi_Host *shost; 1287 1288 /* If the pci channel is offline, ignore possible errors, 1289 * since we cannot communicate with the pci card anyway. 1290 */ 1291 if (pci_channel_offline(phba->pcidev)) { 1292 spin_lock_irq(&phba->hbalock); 1293 phba->hba_flag &= ~DEFER_ERATT; 1294 spin_unlock_irq(&phba->hbalock); 1295 return; 1296 } 1297 1298 /* If resets are disabled then leave the HBA alone and return */ 1299 if (!phba->cfg_enable_hba_reset) 1300 return; 1301 1302 /* Send an internal error event to mgmt application */ 1303 lpfc_board_errevt_to_mgmt(phba); 1304 1305 if (phba->hba_flag & DEFER_ERATT) 1306 lpfc_handle_deferred_eratt(phba); 1307 1308 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1309 if (phba->work_hs & HS_FFER6) 1310 /* Re-establishing Link */ 1311 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1312 "1301 Re-establishing Link " 1313 "Data: x%x x%x x%x\n", 1314 phba->work_hs, phba->work_status[0], 1315 phba->work_status[1]); 1316 if (phba->work_hs & HS_FFER8) 1317 /* Device Zeroization */ 1318 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1319 "2861 Host Authentication device " 1320 "zeroization Data:x%x x%x x%x\n", 1321 phba->work_hs, phba->work_status[0], 1322 phba->work_status[1]); 1323 1324 spin_lock_irq(&phba->hbalock); 1325 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1326 spin_unlock_irq(&phba->hbalock); 1327 1328 /* 1329 * Firmware stops when it triggled erratt with HS_FFER6. 1330 * That could cause the I/Os dropped by the firmware. 1331 * Error iocb (I/O) on txcmplq and let the SCSI layer 1332 * retry it after re-establishing link. 1333 */ 1334 pring = &psli->ring[psli->fcp_ring]; 1335 lpfc_sli_abort_iocb_ring(phba, pring); 1336 1337 /* 1338 * There was a firmware error. Take the hba offline and then 1339 * attempt to restart it. 1340 */ 1341 lpfc_offline_prep(phba); 1342 lpfc_offline(phba); 1343 lpfc_sli_brdrestart(phba); 1344 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1345 lpfc_unblock_mgmt_io(phba); 1346 return; 1347 } 1348 lpfc_unblock_mgmt_io(phba); 1349 } else if (phba->work_hs & HS_CRIT_TEMP) { 1350 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1351 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1352 temp_event_data.event_code = LPFC_CRIT_TEMP; 1353 temp_event_data.data = (uint32_t)temperature; 1354 1355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1356 "0406 Adapter maximum temperature exceeded " 1357 "(%ld), taking this port offline " 1358 "Data: x%x x%x x%x\n", 1359 temperature, phba->work_hs, 1360 phba->work_status[0], phba->work_status[1]); 1361 1362 shost = lpfc_shost_from_vport(phba->pport); 1363 fc_host_post_vendor_event(shost, fc_get_event_number(), 1364 sizeof(temp_event_data), 1365 (char *) &temp_event_data, 1366 SCSI_NL_VID_TYPE_PCI 1367 | PCI_VENDOR_ID_EMULEX); 1368 1369 spin_lock_irq(&phba->hbalock); 1370 phba->over_temp_state = HBA_OVER_TEMP; 1371 spin_unlock_irq(&phba->hbalock); 1372 lpfc_offline_eratt(phba); 1373 1374 } else { 1375 /* The if clause above forces this code path when the status 1376 * failure is a value other than FFER6. Do not call the offline 1377 * twice. This is the adapter hardware error path. 1378 */ 1379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1380 "0457 Adapter Hardware Error " 1381 "Data: x%x x%x x%x\n", 1382 phba->work_hs, 1383 phba->work_status[0], phba->work_status[1]); 1384 1385 event_data = FC_REG_DUMP_EVENT; 1386 shost = lpfc_shost_from_vport(vport); 1387 fc_host_post_vendor_event(shost, fc_get_event_number(), 1388 sizeof(event_data), (char *) &event_data, 1389 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1390 1391 lpfc_offline_eratt(phba); 1392 } 1393 return; 1394 } 1395 1396 /** 1397 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1398 * @phba: pointer to lpfc hba data structure. 1399 * 1400 * This routine is invoked to handle the SLI4 HBA hardware error attention 1401 * conditions. 1402 **/ 1403 static void 1404 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1405 { 1406 struct lpfc_vport *vport = phba->pport; 1407 uint32_t event_data; 1408 struct Scsi_Host *shost; 1409 uint32_t if_type; 1410 struct lpfc_register portstat_reg; 1411 1412 /* If the pci channel is offline, ignore possible errors, since 1413 * we cannot communicate with the pci card anyway. 1414 */ 1415 if (pci_channel_offline(phba->pcidev)) 1416 return; 1417 /* If resets are disabled then leave the HBA alone and return */ 1418 if (!phba->cfg_enable_hba_reset) 1419 return; 1420 1421 /* Send an internal error event to mgmt application */ 1422 lpfc_board_errevt_to_mgmt(phba); 1423 1424 /* For now, the actual action for SLI4 device handling is not 1425 * specified yet, just treated it as adaptor hardware failure 1426 */ 1427 event_data = FC_REG_DUMP_EVENT; 1428 shost = lpfc_shost_from_vport(vport); 1429 fc_host_post_vendor_event(shost, fc_get_event_number(), 1430 sizeof(event_data), (char *) &event_data, 1431 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1432 1433 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1434 switch (if_type) { 1435 case LPFC_SLI_INTF_IF_TYPE_0: 1436 lpfc_sli4_offline_eratt(phba); 1437 break; 1438 case LPFC_SLI_INTF_IF_TYPE_2: 1439 portstat_reg.word0 = 1440 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 1441 1442 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1443 /* TODO: Register for Overtemp async events. */ 1444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1445 "2889 Port Overtemperature event, " 1446 "taking port\n"); 1447 spin_lock_irq(&phba->hbalock); 1448 phba->over_temp_state = HBA_OVER_TEMP; 1449 spin_unlock_irq(&phba->hbalock); 1450 lpfc_sli4_offline_eratt(phba); 1451 return; 1452 } 1453 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1454 /* 1455 * TODO: Attempt port recovery via a port reset. 1456 * When fully implemented, the driver should 1457 * attempt to recover the port here and return. 1458 * For now, log an error and take the port offline. 1459 */ 1460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1461 "2887 Port Error: Attempting " 1462 "Port Recovery\n"); 1463 } 1464 lpfc_sli4_offline_eratt(phba); 1465 break; 1466 case LPFC_SLI_INTF_IF_TYPE_1: 1467 default: 1468 break; 1469 } 1470 } 1471 1472 /** 1473 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1474 * @phba: pointer to lpfc HBA data structure. 1475 * 1476 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1477 * routine from the API jump table function pointer from the lpfc_hba struct. 1478 * 1479 * Return codes 1480 * 0 - success. 1481 * Any other value - error. 1482 **/ 1483 void 1484 lpfc_handle_eratt(struct lpfc_hba *phba) 1485 { 1486 (*phba->lpfc_handle_eratt)(phba); 1487 } 1488 1489 /** 1490 * lpfc_handle_latt - The HBA link event handler 1491 * @phba: pointer to lpfc hba data structure. 1492 * 1493 * This routine is invoked from the worker thread to handle a HBA host 1494 * attention link event. 1495 **/ 1496 void 1497 lpfc_handle_latt(struct lpfc_hba *phba) 1498 { 1499 struct lpfc_vport *vport = phba->pport; 1500 struct lpfc_sli *psli = &phba->sli; 1501 LPFC_MBOXQ_t *pmb; 1502 volatile uint32_t control; 1503 struct lpfc_dmabuf *mp; 1504 int rc = 0; 1505 1506 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1507 if (!pmb) { 1508 rc = 1; 1509 goto lpfc_handle_latt_err_exit; 1510 } 1511 1512 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1513 if (!mp) { 1514 rc = 2; 1515 goto lpfc_handle_latt_free_pmb; 1516 } 1517 1518 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1519 if (!mp->virt) { 1520 rc = 3; 1521 goto lpfc_handle_latt_free_mp; 1522 } 1523 1524 /* Cleanup any outstanding ELS commands */ 1525 lpfc_els_flush_all_cmd(phba); 1526 1527 psli->slistat.link_event++; 1528 lpfc_read_topology(phba, pmb, mp); 1529 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1530 pmb->vport = vport; 1531 /* Block ELS IOCBs until we have processed this mbox command */ 1532 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1533 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1534 if (rc == MBX_NOT_FINISHED) { 1535 rc = 4; 1536 goto lpfc_handle_latt_free_mbuf; 1537 } 1538 1539 /* Clear Link Attention in HA REG */ 1540 spin_lock_irq(&phba->hbalock); 1541 writel(HA_LATT, phba->HAregaddr); 1542 readl(phba->HAregaddr); /* flush */ 1543 spin_unlock_irq(&phba->hbalock); 1544 1545 return; 1546 1547 lpfc_handle_latt_free_mbuf: 1548 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1549 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1550 lpfc_handle_latt_free_mp: 1551 kfree(mp); 1552 lpfc_handle_latt_free_pmb: 1553 mempool_free(pmb, phba->mbox_mem_pool); 1554 lpfc_handle_latt_err_exit: 1555 /* Enable Link attention interrupts */ 1556 spin_lock_irq(&phba->hbalock); 1557 psli->sli_flag |= LPFC_PROCESS_LA; 1558 control = readl(phba->HCregaddr); 1559 control |= HC_LAINT_ENA; 1560 writel(control, phba->HCregaddr); 1561 readl(phba->HCregaddr); /* flush */ 1562 1563 /* Clear Link Attention in HA REG */ 1564 writel(HA_LATT, phba->HAregaddr); 1565 readl(phba->HAregaddr); /* flush */ 1566 spin_unlock_irq(&phba->hbalock); 1567 lpfc_linkdown(phba); 1568 phba->link_state = LPFC_HBA_ERROR; 1569 1570 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1571 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1572 1573 return; 1574 } 1575 1576 /** 1577 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1578 * @phba: pointer to lpfc hba data structure. 1579 * @vpd: pointer to the vital product data. 1580 * @len: length of the vital product data in bytes. 1581 * 1582 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1583 * an array of characters. In this routine, the ModelName, ProgramType, and 1584 * ModelDesc, etc. fields of the phba data structure will be populated. 1585 * 1586 * Return codes 1587 * 0 - pointer to the VPD passed in is NULL 1588 * 1 - success 1589 **/ 1590 int 1591 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1592 { 1593 uint8_t lenlo, lenhi; 1594 int Length; 1595 int i, j; 1596 int finished = 0; 1597 int index = 0; 1598 1599 if (!vpd) 1600 return 0; 1601 1602 /* Vital Product */ 1603 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1604 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1605 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1606 (uint32_t) vpd[3]); 1607 while (!finished && (index < (len - 4))) { 1608 switch (vpd[index]) { 1609 case 0x82: 1610 case 0x91: 1611 index += 1; 1612 lenlo = vpd[index]; 1613 index += 1; 1614 lenhi = vpd[index]; 1615 index += 1; 1616 i = ((((unsigned short)lenhi) << 8) + lenlo); 1617 index += i; 1618 break; 1619 case 0x90: 1620 index += 1; 1621 lenlo = vpd[index]; 1622 index += 1; 1623 lenhi = vpd[index]; 1624 index += 1; 1625 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1626 if (Length > len - index) 1627 Length = len - index; 1628 while (Length > 0) { 1629 /* Look for Serial Number */ 1630 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1631 index += 2; 1632 i = vpd[index]; 1633 index += 1; 1634 j = 0; 1635 Length -= (3+i); 1636 while(i--) { 1637 phba->SerialNumber[j++] = vpd[index++]; 1638 if (j == 31) 1639 break; 1640 } 1641 phba->SerialNumber[j] = 0; 1642 continue; 1643 } 1644 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1645 phba->vpd_flag |= VPD_MODEL_DESC; 1646 index += 2; 1647 i = vpd[index]; 1648 index += 1; 1649 j = 0; 1650 Length -= (3+i); 1651 while(i--) { 1652 phba->ModelDesc[j++] = vpd[index++]; 1653 if (j == 255) 1654 break; 1655 } 1656 phba->ModelDesc[j] = 0; 1657 continue; 1658 } 1659 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1660 phba->vpd_flag |= VPD_MODEL_NAME; 1661 index += 2; 1662 i = vpd[index]; 1663 index += 1; 1664 j = 0; 1665 Length -= (3+i); 1666 while(i--) { 1667 phba->ModelName[j++] = vpd[index++]; 1668 if (j == 79) 1669 break; 1670 } 1671 phba->ModelName[j] = 0; 1672 continue; 1673 } 1674 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 1675 phba->vpd_flag |= VPD_PROGRAM_TYPE; 1676 index += 2; 1677 i = vpd[index]; 1678 index += 1; 1679 j = 0; 1680 Length -= (3+i); 1681 while(i--) { 1682 phba->ProgramType[j++] = vpd[index++]; 1683 if (j == 255) 1684 break; 1685 } 1686 phba->ProgramType[j] = 0; 1687 continue; 1688 } 1689 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 1690 phba->vpd_flag |= VPD_PORT; 1691 index += 2; 1692 i = vpd[index]; 1693 index += 1; 1694 j = 0; 1695 Length -= (3+i); 1696 while(i--) { 1697 phba->Port[j++] = vpd[index++]; 1698 if (j == 19) 1699 break; 1700 } 1701 phba->Port[j] = 0; 1702 continue; 1703 } 1704 else { 1705 index += 2; 1706 i = vpd[index]; 1707 index += 1; 1708 index += i; 1709 Length -= (3 + i); 1710 } 1711 } 1712 finished = 0; 1713 break; 1714 case 0x78: 1715 finished = 1; 1716 break; 1717 default: 1718 index ++; 1719 break; 1720 } 1721 } 1722 1723 return(1); 1724 } 1725 1726 /** 1727 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 1728 * @phba: pointer to lpfc hba data structure. 1729 * @mdp: pointer to the data structure to hold the derived model name. 1730 * @descp: pointer to the data structure to hold the derived description. 1731 * 1732 * This routine retrieves HBA's description based on its registered PCI device 1733 * ID. The @descp passed into this function points to an array of 256 chars. It 1734 * shall be returned with the model name, maximum speed, and the host bus type. 1735 * The @mdp passed into this function points to an array of 80 chars. When the 1736 * function returns, the @mdp will be filled with the model name. 1737 **/ 1738 static void 1739 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 1740 { 1741 lpfc_vpd_t *vp; 1742 uint16_t dev_id = phba->pcidev->device; 1743 int max_speed; 1744 int GE = 0; 1745 int oneConnect = 0; /* default is not a oneConnect */ 1746 struct { 1747 char *name; 1748 char *bus; 1749 char *function; 1750 } m = {"<Unknown>", "", ""}; 1751 1752 if (mdp && mdp[0] != '\0' 1753 && descp && descp[0] != '\0') 1754 return; 1755 1756 if (phba->lmt & LMT_10Gb) 1757 max_speed = 10; 1758 else if (phba->lmt & LMT_8Gb) 1759 max_speed = 8; 1760 else if (phba->lmt & LMT_4Gb) 1761 max_speed = 4; 1762 else if (phba->lmt & LMT_2Gb) 1763 max_speed = 2; 1764 else 1765 max_speed = 1; 1766 1767 vp = &phba->vpd; 1768 1769 switch (dev_id) { 1770 case PCI_DEVICE_ID_FIREFLY: 1771 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; 1772 break; 1773 case PCI_DEVICE_ID_SUPERFLY: 1774 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 1775 m = (typeof(m)){"LP7000", "PCI", 1776 "Fibre Channel Adapter"}; 1777 else 1778 m = (typeof(m)){"LP7000E", "PCI", 1779 "Fibre Channel Adapter"}; 1780 break; 1781 case PCI_DEVICE_ID_DRAGONFLY: 1782 m = (typeof(m)){"LP8000", "PCI", 1783 "Fibre Channel Adapter"}; 1784 break; 1785 case PCI_DEVICE_ID_CENTAUR: 1786 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 1787 m = (typeof(m)){"LP9002", "PCI", 1788 "Fibre Channel Adapter"}; 1789 else 1790 m = (typeof(m)){"LP9000", "PCI", 1791 "Fibre Channel Adapter"}; 1792 break; 1793 case PCI_DEVICE_ID_RFLY: 1794 m = (typeof(m)){"LP952", "PCI", 1795 "Fibre Channel Adapter"}; 1796 break; 1797 case PCI_DEVICE_ID_PEGASUS: 1798 m = (typeof(m)){"LP9802", "PCI-X", 1799 "Fibre Channel Adapter"}; 1800 break; 1801 case PCI_DEVICE_ID_THOR: 1802 m = (typeof(m)){"LP10000", "PCI-X", 1803 "Fibre Channel Adapter"}; 1804 break; 1805 case PCI_DEVICE_ID_VIPER: 1806 m = (typeof(m)){"LPX1000", "PCI-X", 1807 "Fibre Channel Adapter"}; 1808 break; 1809 case PCI_DEVICE_ID_PFLY: 1810 m = (typeof(m)){"LP982", "PCI-X", 1811 "Fibre Channel Adapter"}; 1812 break; 1813 case PCI_DEVICE_ID_TFLY: 1814 m = (typeof(m)){"LP1050", "PCI-X", 1815 "Fibre Channel Adapter"}; 1816 break; 1817 case PCI_DEVICE_ID_HELIOS: 1818 m = (typeof(m)){"LP11000", "PCI-X2", 1819 "Fibre Channel Adapter"}; 1820 break; 1821 case PCI_DEVICE_ID_HELIOS_SCSP: 1822 m = (typeof(m)){"LP11000-SP", "PCI-X2", 1823 "Fibre Channel Adapter"}; 1824 break; 1825 case PCI_DEVICE_ID_HELIOS_DCSP: 1826 m = (typeof(m)){"LP11002-SP", "PCI-X2", 1827 "Fibre Channel Adapter"}; 1828 break; 1829 case PCI_DEVICE_ID_NEPTUNE: 1830 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; 1831 break; 1832 case PCI_DEVICE_ID_NEPTUNE_SCSP: 1833 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; 1834 break; 1835 case PCI_DEVICE_ID_NEPTUNE_DCSP: 1836 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; 1837 break; 1838 case PCI_DEVICE_ID_BMID: 1839 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 1840 break; 1841 case PCI_DEVICE_ID_BSMB: 1842 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; 1843 break; 1844 case PCI_DEVICE_ID_ZEPHYR: 1845 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1846 break; 1847 case PCI_DEVICE_ID_ZEPHYR_SCSP: 1848 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 1849 break; 1850 case PCI_DEVICE_ID_ZEPHYR_DCSP: 1851 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 1852 GE = 1; 1853 break; 1854 case PCI_DEVICE_ID_ZMID: 1855 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 1856 break; 1857 case PCI_DEVICE_ID_ZSMB: 1858 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 1859 break; 1860 case PCI_DEVICE_ID_LP101: 1861 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; 1862 break; 1863 case PCI_DEVICE_ID_LP10000S: 1864 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; 1865 break; 1866 case PCI_DEVICE_ID_LP11000S: 1867 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; 1868 break; 1869 case PCI_DEVICE_ID_LPE11000S: 1870 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; 1871 break; 1872 case PCI_DEVICE_ID_SAT: 1873 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 1874 break; 1875 case PCI_DEVICE_ID_SAT_MID: 1876 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 1877 break; 1878 case PCI_DEVICE_ID_SAT_SMB: 1879 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 1880 break; 1881 case PCI_DEVICE_ID_SAT_DCSP: 1882 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 1883 break; 1884 case PCI_DEVICE_ID_SAT_SCSP: 1885 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 1886 break; 1887 case PCI_DEVICE_ID_SAT_S: 1888 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 1889 break; 1890 case PCI_DEVICE_ID_HORNET: 1891 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; 1892 GE = 1; 1893 break; 1894 case PCI_DEVICE_ID_PROTEUS_VF: 1895 m = (typeof(m)){"LPev12000", "PCIe IOV", 1896 "Fibre Channel Adapter"}; 1897 break; 1898 case PCI_DEVICE_ID_PROTEUS_PF: 1899 m = (typeof(m)){"LPev12000", "PCIe IOV", 1900 "Fibre Channel Adapter"}; 1901 break; 1902 case PCI_DEVICE_ID_PROTEUS_S: 1903 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 1904 "Fibre Channel Adapter"}; 1905 break; 1906 case PCI_DEVICE_ID_TIGERSHARK: 1907 oneConnect = 1; 1908 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 1909 break; 1910 case PCI_DEVICE_ID_TOMCAT: 1911 oneConnect = 1; 1912 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 1913 break; 1914 case PCI_DEVICE_ID_FALCON: 1915 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 1916 "EmulexSecure Fibre"}; 1917 break; 1918 case PCI_DEVICE_ID_BALIUS: 1919 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 1920 "Fibre Channel Adapter"}; 1921 break; 1922 case PCI_DEVICE_ID_LANCER_FC: 1923 oneConnect = 1; 1924 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; 1925 break; 1926 case PCI_DEVICE_ID_LANCER_FCOE: 1927 oneConnect = 1; 1928 m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; 1929 break; 1930 default: 1931 m = (typeof(m)){"Unknown", "", ""}; 1932 break; 1933 } 1934 1935 if (mdp && mdp[0] == '\0') 1936 snprintf(mdp, 79,"%s", m.name); 1937 /* oneConnect hba requires special processing, they are all initiators 1938 * and we put the port number on the end 1939 */ 1940 if (descp && descp[0] == '\0') { 1941 if (oneConnect) 1942 snprintf(descp, 255, 1943 "Emulex OneConnect %s, %s Initiator, Port %s", 1944 m.name, m.function, 1945 phba->Port); 1946 else 1947 snprintf(descp, 255, 1948 "Emulex %s %d%s %s %s", 1949 m.name, max_speed, (GE) ? "GE" : "Gb", 1950 m.bus, m.function); 1951 } 1952 } 1953 1954 /** 1955 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 1956 * @phba: pointer to lpfc hba data structure. 1957 * @pring: pointer to a IOCB ring. 1958 * @cnt: the number of IOCBs to be posted to the IOCB ring. 1959 * 1960 * This routine posts a given number of IOCBs with the associated DMA buffer 1961 * descriptors specified by the cnt argument to the given IOCB ring. 1962 * 1963 * Return codes 1964 * The number of IOCBs NOT able to be posted to the IOCB ring. 1965 **/ 1966 int 1967 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 1968 { 1969 IOCB_t *icmd; 1970 struct lpfc_iocbq *iocb; 1971 struct lpfc_dmabuf *mp1, *mp2; 1972 1973 cnt += pring->missbufcnt; 1974 1975 /* While there are buffers to post */ 1976 while (cnt > 0) { 1977 /* Allocate buffer for command iocb */ 1978 iocb = lpfc_sli_get_iocbq(phba); 1979 if (iocb == NULL) { 1980 pring->missbufcnt = cnt; 1981 return cnt; 1982 } 1983 icmd = &iocb->iocb; 1984 1985 /* 2 buffers can be posted per command */ 1986 /* Allocate buffer to post */ 1987 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 1988 if (mp1) 1989 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 1990 if (!mp1 || !mp1->virt) { 1991 kfree(mp1); 1992 lpfc_sli_release_iocbq(phba, iocb); 1993 pring->missbufcnt = cnt; 1994 return cnt; 1995 } 1996 1997 INIT_LIST_HEAD(&mp1->list); 1998 /* Allocate buffer to post */ 1999 if (cnt > 1) { 2000 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2001 if (mp2) 2002 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2003 &mp2->phys); 2004 if (!mp2 || !mp2->virt) { 2005 kfree(mp2); 2006 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2007 kfree(mp1); 2008 lpfc_sli_release_iocbq(phba, iocb); 2009 pring->missbufcnt = cnt; 2010 return cnt; 2011 } 2012 2013 INIT_LIST_HEAD(&mp2->list); 2014 } else { 2015 mp2 = NULL; 2016 } 2017 2018 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2019 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2020 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2021 icmd->ulpBdeCount = 1; 2022 cnt--; 2023 if (mp2) { 2024 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2025 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2026 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2027 cnt--; 2028 icmd->ulpBdeCount = 2; 2029 } 2030 2031 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2032 icmd->ulpLe = 1; 2033 2034 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2035 IOCB_ERROR) { 2036 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2037 kfree(mp1); 2038 cnt++; 2039 if (mp2) { 2040 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2041 kfree(mp2); 2042 cnt++; 2043 } 2044 lpfc_sli_release_iocbq(phba, iocb); 2045 pring->missbufcnt = cnt; 2046 return cnt; 2047 } 2048 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2049 if (mp2) 2050 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2051 } 2052 pring->missbufcnt = 0; 2053 return 0; 2054 } 2055 2056 /** 2057 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2058 * @phba: pointer to lpfc hba data structure. 2059 * 2060 * This routine posts initial receive IOCB buffers to the ELS ring. The 2061 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2062 * set to 64 IOCBs. 2063 * 2064 * Return codes 2065 * 0 - success (currently always success) 2066 **/ 2067 static int 2068 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2069 { 2070 struct lpfc_sli *psli = &phba->sli; 2071 2072 /* Ring 0, ELS / CT buffers */ 2073 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2074 /* Ring 2 - FCP no buffers needed */ 2075 2076 return 0; 2077 } 2078 2079 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2080 2081 /** 2082 * lpfc_sha_init - Set up initial array of hash table entries 2083 * @HashResultPointer: pointer to an array as hash table. 2084 * 2085 * This routine sets up the initial values to the array of hash table entries 2086 * for the LC HBAs. 2087 **/ 2088 static void 2089 lpfc_sha_init(uint32_t * HashResultPointer) 2090 { 2091 HashResultPointer[0] = 0x67452301; 2092 HashResultPointer[1] = 0xEFCDAB89; 2093 HashResultPointer[2] = 0x98BADCFE; 2094 HashResultPointer[3] = 0x10325476; 2095 HashResultPointer[4] = 0xC3D2E1F0; 2096 } 2097 2098 /** 2099 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2100 * @HashResultPointer: pointer to an initial/result hash table. 2101 * @HashWorkingPointer: pointer to an working hash table. 2102 * 2103 * This routine iterates an initial hash table pointed by @HashResultPointer 2104 * with the values from the working hash table pointeed by @HashWorkingPointer. 2105 * The results are putting back to the initial hash table, returned through 2106 * the @HashResultPointer as the result hash table. 2107 **/ 2108 static void 2109 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2110 { 2111 int t; 2112 uint32_t TEMP; 2113 uint32_t A, B, C, D, E; 2114 t = 16; 2115 do { 2116 HashWorkingPointer[t] = 2117 S(1, 2118 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2119 8] ^ 2120 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2121 } while (++t <= 79); 2122 t = 0; 2123 A = HashResultPointer[0]; 2124 B = HashResultPointer[1]; 2125 C = HashResultPointer[2]; 2126 D = HashResultPointer[3]; 2127 E = HashResultPointer[4]; 2128 2129 do { 2130 if (t < 20) { 2131 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2132 } else if (t < 40) { 2133 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2134 } else if (t < 60) { 2135 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2136 } else { 2137 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2138 } 2139 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2140 E = D; 2141 D = C; 2142 C = S(30, B); 2143 B = A; 2144 A = TEMP; 2145 } while (++t <= 79); 2146 2147 HashResultPointer[0] += A; 2148 HashResultPointer[1] += B; 2149 HashResultPointer[2] += C; 2150 HashResultPointer[3] += D; 2151 HashResultPointer[4] += E; 2152 2153 } 2154 2155 /** 2156 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2157 * @RandomChallenge: pointer to the entry of host challenge random number array. 2158 * @HashWorking: pointer to the entry of the working hash array. 2159 * 2160 * This routine calculates the working hash array referred by @HashWorking 2161 * from the challenge random numbers associated with the host, referred by 2162 * @RandomChallenge. The result is put into the entry of the working hash 2163 * array and returned by reference through @HashWorking. 2164 **/ 2165 static void 2166 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2167 { 2168 *HashWorking = (*RandomChallenge ^ *HashWorking); 2169 } 2170 2171 /** 2172 * lpfc_hba_init - Perform special handling for LC HBA initialization 2173 * @phba: pointer to lpfc hba data structure. 2174 * @hbainit: pointer to an array of unsigned 32-bit integers. 2175 * 2176 * This routine performs the special handling for LC HBA initialization. 2177 **/ 2178 void 2179 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2180 { 2181 int t; 2182 uint32_t *HashWorking; 2183 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2184 2185 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2186 if (!HashWorking) 2187 return; 2188 2189 HashWorking[0] = HashWorking[78] = *pwwnn++; 2190 HashWorking[1] = HashWorking[79] = *pwwnn; 2191 2192 for (t = 0; t < 7; t++) 2193 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2194 2195 lpfc_sha_init(hbainit); 2196 lpfc_sha_iterate(hbainit, HashWorking); 2197 kfree(HashWorking); 2198 } 2199 2200 /** 2201 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2202 * @vport: pointer to a virtual N_Port data structure. 2203 * 2204 * This routine performs the necessary cleanups before deleting the @vport. 2205 * It invokes the discovery state machine to perform necessary state 2206 * transitions and to release the ndlps associated with the @vport. Note, 2207 * the physical port is treated as @vport 0. 2208 **/ 2209 void 2210 lpfc_cleanup(struct lpfc_vport *vport) 2211 { 2212 struct lpfc_hba *phba = vport->phba; 2213 struct lpfc_nodelist *ndlp, *next_ndlp; 2214 int i = 0; 2215 2216 if (phba->link_state > LPFC_LINK_DOWN) 2217 lpfc_port_link_failure(vport); 2218 2219 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2220 if (!NLP_CHK_NODE_ACT(ndlp)) { 2221 ndlp = lpfc_enable_node(vport, ndlp, 2222 NLP_STE_UNUSED_NODE); 2223 if (!ndlp) 2224 continue; 2225 spin_lock_irq(&phba->ndlp_lock); 2226 NLP_SET_FREE_REQ(ndlp); 2227 spin_unlock_irq(&phba->ndlp_lock); 2228 /* Trigger the release of the ndlp memory */ 2229 lpfc_nlp_put(ndlp); 2230 continue; 2231 } 2232 spin_lock_irq(&phba->ndlp_lock); 2233 if (NLP_CHK_FREE_REQ(ndlp)) { 2234 /* The ndlp should not be in memory free mode already */ 2235 spin_unlock_irq(&phba->ndlp_lock); 2236 continue; 2237 } else 2238 /* Indicate request for freeing ndlp memory */ 2239 NLP_SET_FREE_REQ(ndlp); 2240 spin_unlock_irq(&phba->ndlp_lock); 2241 2242 if (vport->port_type != LPFC_PHYSICAL_PORT && 2243 ndlp->nlp_DID == Fabric_DID) { 2244 /* Just free up ndlp with Fabric_DID for vports */ 2245 lpfc_nlp_put(ndlp); 2246 continue; 2247 } 2248 2249 if (ndlp->nlp_type & NLP_FABRIC) 2250 lpfc_disc_state_machine(vport, ndlp, NULL, 2251 NLP_EVT_DEVICE_RECOVERY); 2252 2253 lpfc_disc_state_machine(vport, ndlp, NULL, 2254 NLP_EVT_DEVICE_RM); 2255 2256 } 2257 2258 /* At this point, ALL ndlp's should be gone 2259 * because of the previous NLP_EVT_DEVICE_RM. 2260 * Lets wait for this to happen, if needed. 2261 */ 2262 while (!list_empty(&vport->fc_nodes)) { 2263 if (i++ > 3000) { 2264 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2265 "0233 Nodelist not empty\n"); 2266 list_for_each_entry_safe(ndlp, next_ndlp, 2267 &vport->fc_nodes, nlp_listp) { 2268 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2269 LOG_NODE, 2270 "0282 did:x%x ndlp:x%p " 2271 "usgmap:x%x refcnt:%d\n", 2272 ndlp->nlp_DID, (void *)ndlp, 2273 ndlp->nlp_usg_map, 2274 atomic_read( 2275 &ndlp->kref.refcount)); 2276 } 2277 break; 2278 } 2279 2280 /* Wait for any activity on ndlps to settle */ 2281 msleep(10); 2282 } 2283 } 2284 2285 /** 2286 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2287 * @vport: pointer to a virtual N_Port data structure. 2288 * 2289 * This routine stops all the timers associated with a @vport. This function 2290 * is invoked before disabling or deleting a @vport. Note that the physical 2291 * port is treated as @vport 0. 2292 **/ 2293 void 2294 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2295 { 2296 del_timer_sync(&vport->els_tmofunc); 2297 del_timer_sync(&vport->fc_fdmitmo); 2298 lpfc_can_disctmo(vport); 2299 return; 2300 } 2301 2302 /** 2303 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2304 * @phba: pointer to lpfc hba data structure. 2305 * 2306 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2307 * caller of this routine should already hold the host lock. 2308 **/ 2309 void 2310 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2311 { 2312 /* Clear pending FCF rediscovery wait flag */ 2313 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2314 2315 /* Now, try to stop the timer */ 2316 del_timer(&phba->fcf.redisc_wait); 2317 } 2318 2319 /** 2320 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2321 * @phba: pointer to lpfc hba data structure. 2322 * 2323 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2324 * checks whether the FCF rediscovery wait timer is pending with the host 2325 * lock held before proceeding with disabling the timer and clearing the 2326 * wait timer pendig flag. 2327 **/ 2328 void 2329 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2330 { 2331 spin_lock_irq(&phba->hbalock); 2332 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2333 /* FCF rediscovery timer already fired or stopped */ 2334 spin_unlock_irq(&phba->hbalock); 2335 return; 2336 } 2337 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2338 /* Clear failover in progress flags */ 2339 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2340 spin_unlock_irq(&phba->hbalock); 2341 } 2342 2343 /** 2344 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2345 * @phba: pointer to lpfc hba data structure. 2346 * 2347 * This routine stops all the timers associated with a HBA. This function is 2348 * invoked before either putting a HBA offline or unloading the driver. 2349 **/ 2350 void 2351 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2352 { 2353 lpfc_stop_vport_timers(phba->pport); 2354 del_timer_sync(&phba->sli.mbox_tmo); 2355 del_timer_sync(&phba->fabric_block_timer); 2356 del_timer_sync(&phba->eratt_poll); 2357 del_timer_sync(&phba->hb_tmofunc); 2358 phba->hb_outstanding = 0; 2359 2360 switch (phba->pci_dev_grp) { 2361 case LPFC_PCI_DEV_LP: 2362 /* Stop any LightPulse device specific driver timers */ 2363 del_timer_sync(&phba->fcp_poll_timer); 2364 break; 2365 case LPFC_PCI_DEV_OC: 2366 /* Stop any OneConnect device sepcific driver timers */ 2367 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2368 break; 2369 default: 2370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2371 "0297 Invalid device group (x%x)\n", 2372 phba->pci_dev_grp); 2373 break; 2374 } 2375 return; 2376 } 2377 2378 /** 2379 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2380 * @phba: pointer to lpfc hba data structure. 2381 * 2382 * This routine marks a HBA's management interface as blocked. Once the HBA's 2383 * management interface is marked as blocked, all the user space access to 2384 * the HBA, whether they are from sysfs interface or libdfc interface will 2385 * all be blocked. The HBA is set to block the management interface when the 2386 * driver prepares the HBA interface for online or offline. 2387 **/ 2388 static void 2389 lpfc_block_mgmt_io(struct lpfc_hba * phba) 2390 { 2391 unsigned long iflag; 2392 uint8_t actcmd = MBX_HEARTBEAT; 2393 unsigned long timeout; 2394 2395 2396 spin_lock_irqsave(&phba->hbalock, iflag); 2397 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2398 if (phba->sli.mbox_active) 2399 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2400 spin_unlock_irqrestore(&phba->hbalock, iflag); 2401 /* Determine how long we might wait for the active mailbox 2402 * command to be gracefully completed by firmware. 2403 */ 2404 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 2405 jiffies; 2406 /* Wait for the outstnading mailbox command to complete */ 2407 while (phba->sli.mbox_active) { 2408 /* Check active mailbox complete status every 2ms */ 2409 msleep(2); 2410 if (time_after(jiffies, timeout)) { 2411 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2412 "2813 Mgmt IO is Blocked %x " 2413 "- mbox cmd %x still active\n", 2414 phba->sli.sli_flag, actcmd); 2415 break; 2416 } 2417 } 2418 } 2419 2420 /** 2421 * lpfc_online - Initialize and bring a HBA online 2422 * @phba: pointer to lpfc hba data structure. 2423 * 2424 * This routine initializes the HBA and brings a HBA online. During this 2425 * process, the management interface is blocked to prevent user space access 2426 * to the HBA interfering with the driver initialization. 2427 * 2428 * Return codes 2429 * 0 - successful 2430 * 1 - failed 2431 **/ 2432 int 2433 lpfc_online(struct lpfc_hba *phba) 2434 { 2435 struct lpfc_vport *vport; 2436 struct lpfc_vport **vports; 2437 int i; 2438 2439 if (!phba) 2440 return 0; 2441 vport = phba->pport; 2442 2443 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2444 return 0; 2445 2446 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2447 "0458 Bring Adapter online\n"); 2448 2449 lpfc_block_mgmt_io(phba); 2450 2451 if (!lpfc_sli_queue_setup(phba)) { 2452 lpfc_unblock_mgmt_io(phba); 2453 return 1; 2454 } 2455 2456 if (phba->sli_rev == LPFC_SLI_REV4) { 2457 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2458 lpfc_unblock_mgmt_io(phba); 2459 return 1; 2460 } 2461 } else { 2462 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2463 lpfc_unblock_mgmt_io(phba); 2464 return 1; 2465 } 2466 } 2467 2468 vports = lpfc_create_vport_work_array(phba); 2469 if (vports != NULL) 2470 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2471 struct Scsi_Host *shost; 2472 shost = lpfc_shost_from_vport(vports[i]); 2473 spin_lock_irq(shost->host_lock); 2474 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2475 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2476 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2477 if (phba->sli_rev == LPFC_SLI_REV4) 2478 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2479 spin_unlock_irq(shost->host_lock); 2480 } 2481 lpfc_destroy_vport_work_array(phba, vports); 2482 2483 lpfc_unblock_mgmt_io(phba); 2484 return 0; 2485 } 2486 2487 /** 2488 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2489 * @phba: pointer to lpfc hba data structure. 2490 * 2491 * This routine marks a HBA's management interface as not blocked. Once the 2492 * HBA's management interface is marked as not blocked, all the user space 2493 * access to the HBA, whether they are from sysfs interface or libdfc 2494 * interface will be allowed. The HBA is set to block the management interface 2495 * when the driver prepares the HBA interface for online or offline and then 2496 * set to unblock the management interface afterwards. 2497 **/ 2498 void 2499 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2500 { 2501 unsigned long iflag; 2502 2503 spin_lock_irqsave(&phba->hbalock, iflag); 2504 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2505 spin_unlock_irqrestore(&phba->hbalock, iflag); 2506 } 2507 2508 /** 2509 * lpfc_offline_prep - Prepare a HBA to be brought offline 2510 * @phba: pointer to lpfc hba data structure. 2511 * 2512 * This routine is invoked to prepare a HBA to be brought offline. It performs 2513 * unregistration login to all the nodes on all vports and flushes the mailbox 2514 * queue to make it ready to be brought offline. 2515 **/ 2516 void 2517 lpfc_offline_prep(struct lpfc_hba * phba) 2518 { 2519 struct lpfc_vport *vport = phba->pport; 2520 struct lpfc_nodelist *ndlp, *next_ndlp; 2521 struct lpfc_vport **vports; 2522 struct Scsi_Host *shost; 2523 int i; 2524 2525 if (vport->fc_flag & FC_OFFLINE_MODE) 2526 return; 2527 2528 lpfc_block_mgmt_io(phba); 2529 2530 lpfc_linkdown(phba); 2531 2532 /* Issue an unreg_login to all nodes on all vports */ 2533 vports = lpfc_create_vport_work_array(phba); 2534 if (vports != NULL) { 2535 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2536 if (vports[i]->load_flag & FC_UNLOADING) 2537 continue; 2538 shost = lpfc_shost_from_vport(vports[i]); 2539 spin_lock_irq(shost->host_lock); 2540 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2541 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2542 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2543 spin_unlock_irq(shost->host_lock); 2544 2545 shost = lpfc_shost_from_vport(vports[i]); 2546 list_for_each_entry_safe(ndlp, next_ndlp, 2547 &vports[i]->fc_nodes, 2548 nlp_listp) { 2549 if (!NLP_CHK_NODE_ACT(ndlp)) 2550 continue; 2551 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2552 continue; 2553 if (ndlp->nlp_type & NLP_FABRIC) { 2554 lpfc_disc_state_machine(vports[i], ndlp, 2555 NULL, NLP_EVT_DEVICE_RECOVERY); 2556 lpfc_disc_state_machine(vports[i], ndlp, 2557 NULL, NLP_EVT_DEVICE_RM); 2558 } 2559 spin_lock_irq(shost->host_lock); 2560 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2561 spin_unlock_irq(shost->host_lock); 2562 lpfc_unreg_rpi(vports[i], ndlp); 2563 } 2564 } 2565 } 2566 lpfc_destroy_vport_work_array(phba, vports); 2567 2568 lpfc_sli_mbox_sys_shutdown(phba); 2569 } 2570 2571 /** 2572 * lpfc_offline - Bring a HBA offline 2573 * @phba: pointer to lpfc hba data structure. 2574 * 2575 * This routine actually brings a HBA offline. It stops all the timers 2576 * associated with the HBA, brings down the SLI layer, and eventually 2577 * marks the HBA as in offline state for the upper layer protocol. 2578 **/ 2579 void 2580 lpfc_offline(struct lpfc_hba *phba) 2581 { 2582 struct Scsi_Host *shost; 2583 struct lpfc_vport **vports; 2584 int i; 2585 2586 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 2587 return; 2588 2589 /* stop port and all timers associated with this hba */ 2590 lpfc_stop_port(phba); 2591 vports = lpfc_create_vport_work_array(phba); 2592 if (vports != NULL) 2593 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 2594 lpfc_stop_vport_timers(vports[i]); 2595 lpfc_destroy_vport_work_array(phba, vports); 2596 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2597 "0460 Bring Adapter offline\n"); 2598 /* Bring down the SLI Layer and cleanup. The HBA is offline 2599 now. */ 2600 lpfc_sli_hba_down(phba); 2601 spin_lock_irq(&phba->hbalock); 2602 phba->work_ha = 0; 2603 spin_unlock_irq(&phba->hbalock); 2604 vports = lpfc_create_vport_work_array(phba); 2605 if (vports != NULL) 2606 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2607 shost = lpfc_shost_from_vport(vports[i]); 2608 spin_lock_irq(shost->host_lock); 2609 vports[i]->work_port_events = 0; 2610 vports[i]->fc_flag |= FC_OFFLINE_MODE; 2611 spin_unlock_irq(shost->host_lock); 2612 } 2613 lpfc_destroy_vport_work_array(phba, vports); 2614 } 2615 2616 /** 2617 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 2618 * @phba: pointer to lpfc hba data structure. 2619 * 2620 * This routine is to free all the SCSI buffers and IOCBs from the driver 2621 * list back to kernel. It is called from lpfc_pci_remove_one to free 2622 * the internal resources before the device is removed from the system. 2623 * 2624 * Return codes 2625 * 0 - successful (for now, it always returns 0) 2626 **/ 2627 static int 2628 lpfc_scsi_free(struct lpfc_hba *phba) 2629 { 2630 struct lpfc_scsi_buf *sb, *sb_next; 2631 struct lpfc_iocbq *io, *io_next; 2632 2633 spin_lock_irq(&phba->hbalock); 2634 /* Release all the lpfc_scsi_bufs maintained by this host. */ 2635 spin_lock(&phba->scsi_buf_list_lock); 2636 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 2637 list_del(&sb->list); 2638 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 2639 sb->dma_handle); 2640 kfree(sb); 2641 phba->total_scsi_bufs--; 2642 } 2643 spin_unlock(&phba->scsi_buf_list_lock); 2644 2645 /* Release all the lpfc_iocbq entries maintained by this host. */ 2646 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 2647 list_del(&io->list); 2648 kfree(io); 2649 phba->total_iocbq_bufs--; 2650 } 2651 spin_unlock_irq(&phba->hbalock); 2652 return 0; 2653 } 2654 2655 /** 2656 * lpfc_create_port - Create an FC port 2657 * @phba: pointer to lpfc hba data structure. 2658 * @instance: a unique integer ID to this FC port. 2659 * @dev: pointer to the device data structure. 2660 * 2661 * This routine creates a FC port for the upper layer protocol. The FC port 2662 * can be created on top of either a physical port or a virtual port provided 2663 * by the HBA. This routine also allocates a SCSI host data structure (shost) 2664 * and associates the FC port created before adding the shost into the SCSI 2665 * layer. 2666 * 2667 * Return codes 2668 * @vport - pointer to the virtual N_Port data structure. 2669 * NULL - port create failed. 2670 **/ 2671 struct lpfc_vport * 2672 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 2673 { 2674 struct lpfc_vport *vport; 2675 struct Scsi_Host *shost; 2676 int error = 0; 2677 2678 if (dev != &phba->pcidev->dev) 2679 shost = scsi_host_alloc(&lpfc_vport_template, 2680 sizeof(struct lpfc_vport)); 2681 else 2682 shost = scsi_host_alloc(&lpfc_template, 2683 sizeof(struct lpfc_vport)); 2684 if (!shost) 2685 goto out; 2686 2687 vport = (struct lpfc_vport *) shost->hostdata; 2688 vport->phba = phba; 2689 vport->load_flag |= FC_LOADING; 2690 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2691 vport->fc_rscn_flush = 0; 2692 2693 lpfc_get_vport_cfgparam(vport); 2694 shost->unique_id = instance; 2695 shost->max_id = LPFC_MAX_TARGET; 2696 shost->max_lun = vport->cfg_max_luns; 2697 shost->this_id = -1; 2698 shost->max_cmd_len = 16; 2699 if (phba->sli_rev == LPFC_SLI_REV4) { 2700 shost->dma_boundary = 2701 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 2702 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 2703 } 2704 2705 /* 2706 * Set initial can_queue value since 0 is no longer supported and 2707 * scsi_add_host will fail. This will be adjusted later based on the 2708 * max xri value determined in hba setup. 2709 */ 2710 shost->can_queue = phba->cfg_hba_queue_depth - 10; 2711 if (dev != &phba->pcidev->dev) { 2712 shost->transportt = lpfc_vport_transport_template; 2713 vport->port_type = LPFC_NPIV_PORT; 2714 } else { 2715 shost->transportt = lpfc_transport_template; 2716 vport->port_type = LPFC_PHYSICAL_PORT; 2717 } 2718 2719 /* Initialize all internally managed lists. */ 2720 INIT_LIST_HEAD(&vport->fc_nodes); 2721 INIT_LIST_HEAD(&vport->rcv_buffer_list); 2722 spin_lock_init(&vport->work_port_lock); 2723 2724 init_timer(&vport->fc_disctmo); 2725 vport->fc_disctmo.function = lpfc_disc_timeout; 2726 vport->fc_disctmo.data = (unsigned long)vport; 2727 2728 init_timer(&vport->fc_fdmitmo); 2729 vport->fc_fdmitmo.function = lpfc_fdmi_tmo; 2730 vport->fc_fdmitmo.data = (unsigned long)vport; 2731 2732 init_timer(&vport->els_tmofunc); 2733 vport->els_tmofunc.function = lpfc_els_timeout; 2734 vport->els_tmofunc.data = (unsigned long)vport; 2735 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 2736 if (error) 2737 goto out_put_shost; 2738 2739 spin_lock_irq(&phba->hbalock); 2740 list_add_tail(&vport->listentry, &phba->port_list); 2741 spin_unlock_irq(&phba->hbalock); 2742 return vport; 2743 2744 out_put_shost: 2745 scsi_host_put(shost); 2746 out: 2747 return NULL; 2748 } 2749 2750 /** 2751 * destroy_port - destroy an FC port 2752 * @vport: pointer to an lpfc virtual N_Port data structure. 2753 * 2754 * This routine destroys a FC port from the upper layer protocol. All the 2755 * resources associated with the port are released. 2756 **/ 2757 void 2758 destroy_port(struct lpfc_vport *vport) 2759 { 2760 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2761 struct lpfc_hba *phba = vport->phba; 2762 2763 lpfc_debugfs_terminate(vport); 2764 fc_remove_host(shost); 2765 scsi_remove_host(shost); 2766 2767 spin_lock_irq(&phba->hbalock); 2768 list_del_init(&vport->listentry); 2769 spin_unlock_irq(&phba->hbalock); 2770 2771 lpfc_cleanup(vport); 2772 return; 2773 } 2774 2775 /** 2776 * lpfc_get_instance - Get a unique integer ID 2777 * 2778 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 2779 * uses the kernel idr facility to perform the task. 2780 * 2781 * Return codes: 2782 * instance - a unique integer ID allocated as the new instance. 2783 * -1 - lpfc get instance failed. 2784 **/ 2785 int 2786 lpfc_get_instance(void) 2787 { 2788 int instance = 0; 2789 2790 /* Assign an unused number */ 2791 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 2792 return -1; 2793 if (idr_get_new(&lpfc_hba_index, NULL, &instance)) 2794 return -1; 2795 return instance; 2796 } 2797 2798 /** 2799 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 2800 * @shost: pointer to SCSI host data structure. 2801 * @time: elapsed time of the scan in jiffies. 2802 * 2803 * This routine is called by the SCSI layer with a SCSI host to determine 2804 * whether the scan host is finished. 2805 * 2806 * Note: there is no scan_start function as adapter initialization will have 2807 * asynchronously kicked off the link initialization. 2808 * 2809 * Return codes 2810 * 0 - SCSI host scan is not over yet. 2811 * 1 - SCSI host scan is over. 2812 **/ 2813 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 2814 { 2815 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2816 struct lpfc_hba *phba = vport->phba; 2817 int stat = 0; 2818 2819 spin_lock_irq(shost->host_lock); 2820 2821 if (vport->load_flag & FC_UNLOADING) { 2822 stat = 1; 2823 goto finished; 2824 } 2825 if (time >= 30 * HZ) { 2826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2827 "0461 Scanning longer than 30 " 2828 "seconds. Continuing initialization\n"); 2829 stat = 1; 2830 goto finished; 2831 } 2832 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { 2833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2834 "0465 Link down longer than 15 " 2835 "seconds. Continuing initialization\n"); 2836 stat = 1; 2837 goto finished; 2838 } 2839 2840 if (vport->port_state != LPFC_VPORT_READY) 2841 goto finished; 2842 if (vport->num_disc_nodes || vport->fc_prli_sent) 2843 goto finished; 2844 if (vport->fc_map_cnt == 0 && time < 2 * HZ) 2845 goto finished; 2846 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 2847 goto finished; 2848 2849 stat = 1; 2850 2851 finished: 2852 spin_unlock_irq(shost->host_lock); 2853 return stat; 2854 } 2855 2856 /** 2857 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 2858 * @shost: pointer to SCSI host data structure. 2859 * 2860 * This routine initializes a given SCSI host attributes on a FC port. The 2861 * SCSI host can be either on top of a physical port or a virtual port. 2862 **/ 2863 void lpfc_host_attrib_init(struct Scsi_Host *shost) 2864 { 2865 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 2866 struct lpfc_hba *phba = vport->phba; 2867 /* 2868 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 2869 */ 2870 2871 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 2872 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 2873 fc_host_supported_classes(shost) = FC_COS_CLASS3; 2874 2875 memset(fc_host_supported_fc4s(shost), 0, 2876 sizeof(fc_host_supported_fc4s(shost))); 2877 fc_host_supported_fc4s(shost)[2] = 1; 2878 fc_host_supported_fc4s(shost)[7] = 1; 2879 2880 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 2881 sizeof fc_host_symbolic_name(shost)); 2882 2883 fc_host_supported_speeds(shost) = 0; 2884 if (phba->lmt & LMT_10Gb) 2885 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 2886 if (phba->lmt & LMT_8Gb) 2887 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 2888 if (phba->lmt & LMT_4Gb) 2889 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 2890 if (phba->lmt & LMT_2Gb) 2891 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 2892 if (phba->lmt & LMT_1Gb) 2893 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 2894 2895 fc_host_maxframe_size(shost) = 2896 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 2897 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 2898 2899 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 2900 2901 /* This value is also unchanging */ 2902 memset(fc_host_active_fc4s(shost), 0, 2903 sizeof(fc_host_active_fc4s(shost))); 2904 fc_host_active_fc4s(shost)[2] = 1; 2905 fc_host_active_fc4s(shost)[7] = 1; 2906 2907 fc_host_max_npiv_vports(shost) = phba->max_vpi; 2908 spin_lock_irq(shost->host_lock); 2909 vport->load_flag &= ~FC_LOADING; 2910 spin_unlock_irq(shost->host_lock); 2911 } 2912 2913 /** 2914 * lpfc_stop_port_s3 - Stop SLI3 device port 2915 * @phba: pointer to lpfc hba data structure. 2916 * 2917 * This routine is invoked to stop an SLI3 device port, it stops the device 2918 * from generating interrupts and stops the device driver's timers for the 2919 * device. 2920 **/ 2921 static void 2922 lpfc_stop_port_s3(struct lpfc_hba *phba) 2923 { 2924 /* Clear all interrupt enable conditions */ 2925 writel(0, phba->HCregaddr); 2926 readl(phba->HCregaddr); /* flush */ 2927 /* Clear all pending interrupts */ 2928 writel(0xffffffff, phba->HAregaddr); 2929 readl(phba->HAregaddr); /* flush */ 2930 2931 /* Reset some HBA SLI setup states */ 2932 lpfc_stop_hba_timers(phba); 2933 phba->pport->work_port_events = 0; 2934 } 2935 2936 /** 2937 * lpfc_stop_port_s4 - Stop SLI4 device port 2938 * @phba: pointer to lpfc hba data structure. 2939 * 2940 * This routine is invoked to stop an SLI4 device port, it stops the device 2941 * from generating interrupts and stops the device driver's timers for the 2942 * device. 2943 **/ 2944 static void 2945 lpfc_stop_port_s4(struct lpfc_hba *phba) 2946 { 2947 /* Reset some HBA SLI4 setup states */ 2948 lpfc_stop_hba_timers(phba); 2949 phba->pport->work_port_events = 0; 2950 phba->sli4_hba.intr_enable = 0; 2951 } 2952 2953 /** 2954 * lpfc_stop_port - Wrapper function for stopping hba port 2955 * @phba: Pointer to HBA context object. 2956 * 2957 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 2958 * the API jump table function pointer from the lpfc_hba struct. 2959 **/ 2960 void 2961 lpfc_stop_port(struct lpfc_hba *phba) 2962 { 2963 phba->lpfc_stop_port(phba); 2964 } 2965 2966 /** 2967 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 2968 * @phba: Pointer to hba for which this call is being executed. 2969 * 2970 * This routine starts the timer waiting for the FCF rediscovery to complete. 2971 **/ 2972 void 2973 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 2974 { 2975 unsigned long fcf_redisc_wait_tmo = 2976 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 2977 /* Start fcf rediscovery wait period timer */ 2978 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 2979 spin_lock_irq(&phba->hbalock); 2980 /* Allow action to new fcf asynchronous event */ 2981 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2982 /* Mark the FCF rediscovery pending state */ 2983 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 2984 spin_unlock_irq(&phba->hbalock); 2985 } 2986 2987 /** 2988 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 2989 * @ptr: Map to lpfc_hba data structure pointer. 2990 * 2991 * This routine is invoked when waiting for FCF table rediscover has been 2992 * timed out. If new FCF record(s) has (have) been discovered during the 2993 * wait period, a new FCF event shall be added to the FCOE async event 2994 * list, and then worker thread shall be waked up for processing from the 2995 * worker thread context. 2996 **/ 2997 void 2998 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 2999 { 3000 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3001 3002 /* Don't send FCF rediscovery event if timer cancelled */ 3003 spin_lock_irq(&phba->hbalock); 3004 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3005 spin_unlock_irq(&phba->hbalock); 3006 return; 3007 } 3008 /* Clear FCF rediscovery timer pending flag */ 3009 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3010 /* FCF rediscovery event to worker thread */ 3011 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3012 spin_unlock_irq(&phba->hbalock); 3013 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3014 "2776 FCF rediscover quiescent timer expired\n"); 3015 /* wake up worker thread */ 3016 lpfc_worker_wake_up(phba); 3017 } 3018 3019 /** 3020 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3021 * @phba: pointer to lpfc hba data structure. 3022 * @acqe_link: pointer to the async link completion queue entry. 3023 * 3024 * This routine is to parse the SLI4 link-attention link fault code and 3025 * translate it into the base driver's read link attention mailbox command 3026 * status. 3027 * 3028 * Return: Link-attention status in terms of base driver's coding. 3029 **/ 3030 static uint16_t 3031 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3032 struct lpfc_acqe_link *acqe_link) 3033 { 3034 uint16_t latt_fault; 3035 3036 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3037 case LPFC_ASYNC_LINK_FAULT_NONE: 3038 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3039 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3040 latt_fault = 0; 3041 break; 3042 default: 3043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3044 "0398 Invalid link fault code: x%x\n", 3045 bf_get(lpfc_acqe_link_fault, acqe_link)); 3046 latt_fault = MBXERR_ERROR; 3047 break; 3048 } 3049 return latt_fault; 3050 } 3051 3052 /** 3053 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3054 * @phba: pointer to lpfc hba data structure. 3055 * @acqe_link: pointer to the async link completion queue entry. 3056 * 3057 * This routine is to parse the SLI4 link attention type and translate it 3058 * into the base driver's link attention type coding. 3059 * 3060 * Return: Link attention type in terms of base driver's coding. 3061 **/ 3062 static uint8_t 3063 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3064 struct lpfc_acqe_link *acqe_link) 3065 { 3066 uint8_t att_type; 3067 3068 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3069 case LPFC_ASYNC_LINK_STATUS_DOWN: 3070 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3071 att_type = LPFC_ATT_LINK_DOWN; 3072 break; 3073 case LPFC_ASYNC_LINK_STATUS_UP: 3074 /* Ignore physical link up events - wait for logical link up */ 3075 att_type = LPFC_ATT_RESERVED; 3076 break; 3077 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3078 att_type = LPFC_ATT_LINK_UP; 3079 break; 3080 default: 3081 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3082 "0399 Invalid link attention type: x%x\n", 3083 bf_get(lpfc_acqe_link_status, acqe_link)); 3084 att_type = LPFC_ATT_RESERVED; 3085 break; 3086 } 3087 return att_type; 3088 } 3089 3090 /** 3091 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed 3092 * @phba: pointer to lpfc hba data structure. 3093 * @acqe_link: pointer to the async link completion queue entry. 3094 * 3095 * This routine is to parse the SLI4 link-attention link speed and translate 3096 * it into the base driver's link-attention link speed coding. 3097 * 3098 * Return: Link-attention link speed in terms of base driver's coding. 3099 **/ 3100 static uint8_t 3101 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, 3102 struct lpfc_acqe_link *acqe_link) 3103 { 3104 uint8_t link_speed; 3105 3106 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { 3107 case LPFC_ASYNC_LINK_SPEED_ZERO: 3108 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3109 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3110 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3111 break; 3112 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3113 link_speed = LPFC_LINK_SPEED_1GHZ; 3114 break; 3115 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3116 link_speed = LPFC_LINK_SPEED_10GHZ; 3117 break; 3118 default: 3119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3120 "0483 Invalid link-attention link speed: x%x\n", 3121 bf_get(lpfc_acqe_link_speed, acqe_link)); 3122 link_speed = LPFC_LINK_SPEED_UNKNOWN; 3123 break; 3124 } 3125 return link_speed; 3126 } 3127 3128 /** 3129 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3130 * @phba: pointer to lpfc hba data structure. 3131 * @acqe_link: pointer to the async link completion queue entry. 3132 * 3133 * This routine is to handle the SLI4 asynchronous FCoE link event. 3134 **/ 3135 static void 3136 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3137 struct lpfc_acqe_link *acqe_link) 3138 { 3139 struct lpfc_dmabuf *mp; 3140 LPFC_MBOXQ_t *pmb; 3141 MAILBOX_t *mb; 3142 struct lpfc_mbx_read_top *la; 3143 uint8_t att_type; 3144 int rc; 3145 3146 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3147 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3148 return; 3149 phba->fcoe_eventtag = acqe_link->event_tag; 3150 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3151 if (!pmb) { 3152 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3153 "0395 The mboxq allocation failed\n"); 3154 return; 3155 } 3156 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3157 if (!mp) { 3158 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3159 "0396 The lpfc_dmabuf allocation failed\n"); 3160 goto out_free_pmb; 3161 } 3162 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3163 if (!mp->virt) { 3164 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3165 "0397 The mbuf allocation failed\n"); 3166 goto out_free_dmabuf; 3167 } 3168 3169 /* Cleanup any outstanding ELS commands */ 3170 lpfc_els_flush_all_cmd(phba); 3171 3172 /* Block ELS IOCBs until we have done process link event */ 3173 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3174 3175 /* Update link event statistics */ 3176 phba->sli.slistat.link_event++; 3177 3178 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3179 lpfc_read_topology(phba, pmb, mp); 3180 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3181 pmb->vport = phba->pport; 3182 3183 /* Keep the link status for extra SLI4 state machine reference */ 3184 phba->sli4_hba.link_state.speed = 3185 bf_get(lpfc_acqe_link_speed, acqe_link); 3186 phba->sli4_hba.link_state.duplex = 3187 bf_get(lpfc_acqe_link_duplex, acqe_link); 3188 phba->sli4_hba.link_state.status = 3189 bf_get(lpfc_acqe_link_status, acqe_link); 3190 phba->sli4_hba.link_state.type = 3191 bf_get(lpfc_acqe_link_type, acqe_link); 3192 phba->sli4_hba.link_state.number = 3193 bf_get(lpfc_acqe_link_number, acqe_link); 3194 phba->sli4_hba.link_state.fault = 3195 bf_get(lpfc_acqe_link_fault, acqe_link); 3196 phba->sli4_hba.link_state.logical_speed = 3197 bf_get(lpfc_acqe_logical_link_speed, acqe_link); 3198 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3199 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " 3200 "LA Type:x%x Port Type:%d Port Number:%d Logical " 3201 "speed:%dMbps Fault:%d\n", 3202 phba->sli4_hba.link_state.speed, 3203 phba->sli4_hba.link_state.topology, 3204 phba->sli4_hba.link_state.status, 3205 phba->sli4_hba.link_state.type, 3206 phba->sli4_hba.link_state.number, 3207 phba->sli4_hba.link_state.logical_speed * 10, 3208 phba->sli4_hba.link_state.fault); 3209 /* 3210 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3211 * topology info. Note: Optional for non FC-AL ports. 3212 */ 3213 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3214 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3215 if (rc == MBX_NOT_FINISHED) 3216 goto out_free_dmabuf; 3217 return; 3218 } 3219 /* 3220 * For FCoE Mode: fill in all the topology information we need and call 3221 * the READ_TOPOLOGY completion routine to continue without actually 3222 * sending the READ_TOPOLOGY mailbox command to the port. 3223 */ 3224 /* Parse and translate status field */ 3225 mb = &pmb->u.mb; 3226 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 3227 3228 /* Parse and translate link attention fields */ 3229 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3230 la->eventTag = acqe_link->event_tag; 3231 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 3232 bf_set(lpfc_mbx_read_top_link_spd, la, 3233 lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); 3234 3235 /* Fake the the following irrelvant fields */ 3236 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 3237 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 3238 bf_set(lpfc_mbx_read_top_il, la, 0); 3239 bf_set(lpfc_mbx_read_top_pb, la, 0); 3240 bf_set(lpfc_mbx_read_top_fa, la, 0); 3241 bf_set(lpfc_mbx_read_top_mm, la, 0); 3242 3243 /* Invoke the lpfc_handle_latt mailbox command callback function */ 3244 lpfc_mbx_cmpl_read_topology(phba, pmb); 3245 3246 return; 3247 3248 out_free_dmabuf: 3249 kfree(mp); 3250 out_free_pmb: 3251 mempool_free(pmb, phba->mbox_mem_pool); 3252 } 3253 3254 /** 3255 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 3256 * @phba: pointer to lpfc hba data structure. 3257 * @acqe_fc: pointer to the async fc completion queue entry. 3258 * 3259 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 3260 * that the event was received and then issue a read_topology mailbox command so 3261 * that the rest of the driver will treat it the same as SLI3. 3262 **/ 3263 static void 3264 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 3265 { 3266 struct lpfc_dmabuf *mp; 3267 LPFC_MBOXQ_t *pmb; 3268 int rc; 3269 3270 if (bf_get(lpfc_trailer_type, acqe_fc) != 3271 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 3272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3273 "2895 Non FC link Event detected.(%d)\n", 3274 bf_get(lpfc_trailer_type, acqe_fc)); 3275 return; 3276 } 3277 /* Keep the link status for extra SLI4 state machine reference */ 3278 phba->sli4_hba.link_state.speed = 3279 bf_get(lpfc_acqe_fc_la_speed, acqe_fc); 3280 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 3281 phba->sli4_hba.link_state.topology = 3282 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 3283 phba->sli4_hba.link_state.status = 3284 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 3285 phba->sli4_hba.link_state.type = 3286 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 3287 phba->sli4_hba.link_state.number = 3288 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 3289 phba->sli4_hba.link_state.fault = 3290 bf_get(lpfc_acqe_link_fault, acqe_fc); 3291 phba->sli4_hba.link_state.logical_speed = 3292 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); 3293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3294 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 3295 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 3296 "%dMbps Fault:%d\n", 3297 phba->sli4_hba.link_state.speed, 3298 phba->sli4_hba.link_state.topology, 3299 phba->sli4_hba.link_state.status, 3300 phba->sli4_hba.link_state.type, 3301 phba->sli4_hba.link_state.number, 3302 phba->sli4_hba.link_state.logical_speed * 10, 3303 phba->sli4_hba.link_state.fault); 3304 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3305 if (!pmb) { 3306 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3307 "2897 The mboxq allocation failed\n"); 3308 return; 3309 } 3310 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3311 if (!mp) { 3312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3313 "2898 The lpfc_dmabuf allocation failed\n"); 3314 goto out_free_pmb; 3315 } 3316 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3317 if (!mp->virt) { 3318 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3319 "2899 The mbuf allocation failed\n"); 3320 goto out_free_dmabuf; 3321 } 3322 3323 /* Cleanup any outstanding ELS commands */ 3324 lpfc_els_flush_all_cmd(phba); 3325 3326 /* Block ELS IOCBs until we have done process link event */ 3327 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3328 3329 /* Update link event statistics */ 3330 phba->sli.slistat.link_event++; 3331 3332 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3333 lpfc_read_topology(phba, pmb, mp); 3334 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3335 pmb->vport = phba->pport; 3336 3337 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3338 if (rc == MBX_NOT_FINISHED) 3339 goto out_free_dmabuf; 3340 return; 3341 3342 out_free_dmabuf: 3343 kfree(mp); 3344 out_free_pmb: 3345 mempool_free(pmb, phba->mbox_mem_pool); 3346 } 3347 3348 /** 3349 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 3350 * @phba: pointer to lpfc hba data structure. 3351 * @acqe_fc: pointer to the async SLI completion queue entry. 3352 * 3353 * This routine is to handle the SLI4 asynchronous SLI events. 3354 **/ 3355 static void 3356 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 3357 { 3358 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3359 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 3360 "x%08x SLI Event Type:%d", 3361 acqe_sli->event_data1, acqe_sli->event_data2, 3362 bf_get(lpfc_trailer_type, acqe_sli)); 3363 return; 3364 } 3365 3366 /** 3367 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 3368 * @vport: pointer to vport data structure. 3369 * 3370 * This routine is to perform Clear Virtual Link (CVL) on a vport in 3371 * response to a CVL event. 3372 * 3373 * Return the pointer to the ndlp with the vport if successful, otherwise 3374 * return NULL. 3375 **/ 3376 static struct lpfc_nodelist * 3377 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 3378 { 3379 struct lpfc_nodelist *ndlp; 3380 struct Scsi_Host *shost; 3381 struct lpfc_hba *phba; 3382 3383 if (!vport) 3384 return NULL; 3385 phba = vport->phba; 3386 if (!phba) 3387 return NULL; 3388 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3389 if (!ndlp) { 3390 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3391 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3392 if (!ndlp) 3393 return 0; 3394 lpfc_nlp_init(vport, ndlp, Fabric_DID); 3395 /* Set the node type */ 3396 ndlp->nlp_type |= NLP_FABRIC; 3397 /* Put ndlp onto node list */ 3398 lpfc_enqueue_node(vport, ndlp); 3399 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3400 /* re-setup ndlp without removing from node list */ 3401 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3402 if (!ndlp) 3403 return 0; 3404 } 3405 if ((phba->pport->port_state < LPFC_FLOGI) && 3406 (phba->pport->port_state != LPFC_VPORT_FAILED)) 3407 return NULL; 3408 /* If virtual link is not yet instantiated ignore CVL */ 3409 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 3410 && (vport->port_state != LPFC_VPORT_FAILED)) 3411 return NULL; 3412 shost = lpfc_shost_from_vport(vport); 3413 if (!shost) 3414 return NULL; 3415 lpfc_linkdown_port(vport); 3416 lpfc_cleanup_pending_mbox(vport); 3417 spin_lock_irq(shost->host_lock); 3418 vport->fc_flag |= FC_VPORT_CVL_RCVD; 3419 spin_unlock_irq(shost->host_lock); 3420 3421 return ndlp; 3422 } 3423 3424 /** 3425 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 3426 * @vport: pointer to lpfc hba data structure. 3427 * 3428 * This routine is to perform Clear Virtual Link (CVL) on all vports in 3429 * response to a FCF dead event. 3430 **/ 3431 static void 3432 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 3433 { 3434 struct lpfc_vport **vports; 3435 int i; 3436 3437 vports = lpfc_create_vport_work_array(phba); 3438 if (vports) 3439 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3440 lpfc_sli4_perform_vport_cvl(vports[i]); 3441 lpfc_destroy_vport_work_array(phba, vports); 3442 } 3443 3444 /** 3445 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 3446 * @phba: pointer to lpfc hba data structure. 3447 * @acqe_link: pointer to the async fcoe completion queue entry. 3448 * 3449 * This routine is to handle the SLI4 asynchronous fcoe event. 3450 **/ 3451 static void 3452 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 3453 struct lpfc_acqe_fip *acqe_fip) 3454 { 3455 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 3456 int rc; 3457 struct lpfc_vport *vport; 3458 struct lpfc_nodelist *ndlp; 3459 struct Scsi_Host *shost; 3460 int active_vlink_present; 3461 struct lpfc_vport **vports; 3462 int i; 3463 3464 phba->fc_eventTag = acqe_fip->event_tag; 3465 phba->fcoe_eventtag = acqe_fip->event_tag; 3466 switch (event_type) { 3467 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 3468 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 3469 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 3470 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3471 LOG_DISCOVERY, 3472 "2546 New FCF event, evt_tag:x%x, " 3473 "index:x%x\n", 3474 acqe_fip->event_tag, 3475 acqe_fip->index); 3476 else 3477 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 3478 LOG_DISCOVERY, 3479 "2788 FCF param modified event, " 3480 "evt_tag:x%x, index:x%x\n", 3481 acqe_fip->event_tag, 3482 acqe_fip->index); 3483 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3484 /* 3485 * During period of FCF discovery, read the FCF 3486 * table record indexed by the event to update 3487 * FCF roundrobin failover eligible FCF bmask. 3488 */ 3489 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3490 LOG_DISCOVERY, 3491 "2779 Read FCF (x%x) for updating " 3492 "roundrobin FCF failover bmask\n", 3493 acqe_fip->index); 3494 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 3495 } 3496 3497 /* If the FCF discovery is in progress, do nothing. */ 3498 spin_lock_irq(&phba->hbalock); 3499 if (phba->hba_flag & FCF_TS_INPROG) { 3500 spin_unlock_irq(&phba->hbalock); 3501 break; 3502 } 3503 /* If fast FCF failover rescan event is pending, do nothing */ 3504 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 3505 spin_unlock_irq(&phba->hbalock); 3506 break; 3507 } 3508 3509 /* If the FCF has been in discovered state, do nothing. */ 3510 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 3511 spin_unlock_irq(&phba->hbalock); 3512 break; 3513 } 3514 spin_unlock_irq(&phba->hbalock); 3515 3516 /* Otherwise, scan the entire FCF table and re-discover SAN */ 3517 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3518 "2770 Start FCF table scan per async FCF " 3519 "event, evt_tag:x%x, index:x%x\n", 3520 acqe_fip->event_tag, acqe_fip->index); 3521 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3522 LPFC_FCOE_FCF_GET_FIRST); 3523 if (rc) 3524 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3525 "2547 Issue FCF scan read FCF mailbox " 3526 "command failed (x%x)\n", rc); 3527 break; 3528 3529 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 3530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3531 "2548 FCF Table full count 0x%x tag 0x%x\n", 3532 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 3533 acqe_fip->event_tag); 3534 break; 3535 3536 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3537 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3538 "2549 FCF (x%x) disconnected from network, " 3539 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3540 /* 3541 * If we are in the middle of FCF failover process, clear 3542 * the corresponding FCF bit in the roundrobin bitmap. 3543 */ 3544 spin_lock_irq(&phba->hbalock); 3545 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3546 spin_unlock_irq(&phba->hbalock); 3547 /* Update FLOGI FCF failover eligible FCF bmask */ 3548 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 3549 break; 3550 } 3551 spin_unlock_irq(&phba->hbalock); 3552 3553 /* If the event is not for currently used fcf do nothing */ 3554 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 3555 break; 3556 3557 /* 3558 * Otherwise, request the port to rediscover the entire FCF 3559 * table for a fast recovery from case that the current FCF 3560 * is no longer valid as we are not in the middle of FCF 3561 * failover process already. 3562 */ 3563 spin_lock_irq(&phba->hbalock); 3564 /* Mark the fast failover process in progress */ 3565 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 3566 spin_unlock_irq(&phba->hbalock); 3567 3568 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3569 "2771 Start FCF fast failover process due to " 3570 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 3571 "\n", acqe_fip->event_tag, acqe_fip->index); 3572 rc = lpfc_sli4_redisc_fcf_table(phba); 3573 if (rc) { 3574 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3575 LOG_DISCOVERY, 3576 "2772 Issue FCF rediscover mabilbox " 3577 "command failed, fail through to FCF " 3578 "dead event\n"); 3579 spin_lock_irq(&phba->hbalock); 3580 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 3581 spin_unlock_irq(&phba->hbalock); 3582 /* 3583 * Last resort will fail over by treating this 3584 * as a link down to FCF registration. 3585 */ 3586 lpfc_sli4_fcf_dead_failthrough(phba); 3587 } else { 3588 /* Reset FCF roundrobin bmask for new discovery */ 3589 memset(phba->fcf.fcf_rr_bmask, 0, 3590 sizeof(*phba->fcf.fcf_rr_bmask)); 3591 /* 3592 * Handling fast FCF failover to a DEAD FCF event is 3593 * considered equalivant to receiving CVL to all vports. 3594 */ 3595 lpfc_sli4_perform_all_vport_cvl(phba); 3596 } 3597 break; 3598 case LPFC_FIP_EVENT_TYPE_CVL: 3599 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3600 "2718 Clear Virtual Link Received for VPI 0x%x" 3601 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3602 vport = lpfc_find_vport_by_vpid(phba, 3603 acqe_fip->index - phba->vpi_base); 3604 ndlp = lpfc_sli4_perform_vport_cvl(vport); 3605 if (!ndlp) 3606 break; 3607 active_vlink_present = 0; 3608 3609 vports = lpfc_create_vport_work_array(phba); 3610 if (vports) { 3611 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 3612 i++) { 3613 if ((!(vports[i]->fc_flag & 3614 FC_VPORT_CVL_RCVD)) && 3615 (vports[i]->port_state > LPFC_FDISC)) { 3616 active_vlink_present = 1; 3617 break; 3618 } 3619 } 3620 lpfc_destroy_vport_work_array(phba, vports); 3621 } 3622 3623 if (active_vlink_present) { 3624 /* 3625 * If there are other active VLinks present, 3626 * re-instantiate the Vlink using FDISC. 3627 */ 3628 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 3629 shost = lpfc_shost_from_vport(vport); 3630 spin_lock_irq(shost->host_lock); 3631 ndlp->nlp_flag |= NLP_DELAY_TMO; 3632 spin_unlock_irq(shost->host_lock); 3633 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 3634 vport->port_state = LPFC_FDISC; 3635 } else { 3636 /* 3637 * Otherwise, we request port to rediscover 3638 * the entire FCF table for a fast recovery 3639 * from possible case that the current FCF 3640 * is no longer valid if we are not already 3641 * in the FCF failover process. 3642 */ 3643 spin_lock_irq(&phba->hbalock); 3644 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3645 spin_unlock_irq(&phba->hbalock); 3646 break; 3647 } 3648 /* Mark the fast failover process in progress */ 3649 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 3650 spin_unlock_irq(&phba->hbalock); 3651 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 3652 LOG_DISCOVERY, 3653 "2773 Start FCF failover per CVL, " 3654 "evt_tag:x%x\n", acqe_fip->event_tag); 3655 rc = lpfc_sli4_redisc_fcf_table(phba); 3656 if (rc) { 3657 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 3658 LOG_DISCOVERY, 3659 "2774 Issue FCF rediscover " 3660 "mabilbox command failed, " 3661 "through to CVL event\n"); 3662 spin_lock_irq(&phba->hbalock); 3663 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 3664 spin_unlock_irq(&phba->hbalock); 3665 /* 3666 * Last resort will be re-try on the 3667 * the current registered FCF entry. 3668 */ 3669 lpfc_retry_pport_discovery(phba); 3670 } else 3671 /* 3672 * Reset FCF roundrobin bmask for new 3673 * discovery. 3674 */ 3675 memset(phba->fcf.fcf_rr_bmask, 0, 3676 sizeof(*phba->fcf.fcf_rr_bmask)); 3677 } 3678 break; 3679 default: 3680 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3681 "0288 Unknown FCoE event type 0x%x event tag " 3682 "0x%x\n", event_type, acqe_fip->event_tag); 3683 break; 3684 } 3685 } 3686 3687 /** 3688 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 3689 * @phba: pointer to lpfc hba data structure. 3690 * @acqe_link: pointer to the async dcbx completion queue entry. 3691 * 3692 * This routine is to handle the SLI4 asynchronous dcbx event. 3693 **/ 3694 static void 3695 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 3696 struct lpfc_acqe_dcbx *acqe_dcbx) 3697 { 3698 phba->fc_eventTag = acqe_dcbx->event_tag; 3699 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3700 "0290 The SLI4 DCBX asynchronous event is not " 3701 "handled yet\n"); 3702 } 3703 3704 /** 3705 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 3706 * @phba: pointer to lpfc hba data structure. 3707 * @acqe_link: pointer to the async grp5 completion queue entry. 3708 * 3709 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 3710 * is an asynchronous notified of a logical link speed change. The Port 3711 * reports the logical link speed in units of 10Mbps. 3712 **/ 3713 static void 3714 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 3715 struct lpfc_acqe_grp5 *acqe_grp5) 3716 { 3717 uint16_t prev_ll_spd; 3718 3719 phba->fc_eventTag = acqe_grp5->event_tag; 3720 phba->fcoe_eventtag = acqe_grp5->event_tag; 3721 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 3722 phba->sli4_hba.link_state.logical_speed = 3723 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); 3724 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3725 "2789 GRP5 Async Event: Updating logical link speed " 3726 "from %dMbps to %dMbps\n", (prev_ll_spd * 10), 3727 (phba->sli4_hba.link_state.logical_speed*10)); 3728 } 3729 3730 /** 3731 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 3732 * @phba: pointer to lpfc hba data structure. 3733 * 3734 * This routine is invoked by the worker thread to process all the pending 3735 * SLI4 asynchronous events. 3736 **/ 3737 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 3738 { 3739 struct lpfc_cq_event *cq_event; 3740 3741 /* First, declare the async event has been handled */ 3742 spin_lock_irq(&phba->hbalock); 3743 phba->hba_flag &= ~ASYNC_EVENT; 3744 spin_unlock_irq(&phba->hbalock); 3745 /* Now, handle all the async events */ 3746 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 3747 /* Get the first event from the head of the event queue */ 3748 spin_lock_irq(&phba->hbalock); 3749 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 3750 cq_event, struct lpfc_cq_event, list); 3751 spin_unlock_irq(&phba->hbalock); 3752 /* Process the asynchronous event */ 3753 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 3754 case LPFC_TRAILER_CODE_LINK: 3755 lpfc_sli4_async_link_evt(phba, 3756 &cq_event->cqe.acqe_link); 3757 break; 3758 case LPFC_TRAILER_CODE_FCOE: 3759 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 3760 break; 3761 case LPFC_TRAILER_CODE_DCBX: 3762 lpfc_sli4_async_dcbx_evt(phba, 3763 &cq_event->cqe.acqe_dcbx); 3764 break; 3765 case LPFC_TRAILER_CODE_GRP5: 3766 lpfc_sli4_async_grp5_evt(phba, 3767 &cq_event->cqe.acqe_grp5); 3768 break; 3769 case LPFC_TRAILER_CODE_FC: 3770 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 3771 break; 3772 case LPFC_TRAILER_CODE_SLI: 3773 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 3774 break; 3775 default: 3776 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3777 "1804 Invalid asynchrous event code: " 3778 "x%x\n", bf_get(lpfc_trailer_code, 3779 &cq_event->cqe.mcqe_cmpl)); 3780 break; 3781 } 3782 /* Free the completion event processed to the free pool */ 3783 lpfc_sli4_cq_event_release(phba, cq_event); 3784 } 3785 } 3786 3787 /** 3788 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 3789 * @phba: pointer to lpfc hba data structure. 3790 * 3791 * This routine is invoked by the worker thread to process FCF table 3792 * rediscovery pending completion event. 3793 **/ 3794 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 3795 { 3796 int rc; 3797 3798 spin_lock_irq(&phba->hbalock); 3799 /* Clear FCF rediscovery timeout event */ 3800 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 3801 /* Clear driver fast failover FCF record flag */ 3802 phba->fcf.failover_rec.flag = 0; 3803 /* Set state for FCF fast failover */ 3804 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 3805 spin_unlock_irq(&phba->hbalock); 3806 3807 /* Scan FCF table from the first entry to re-discover SAN */ 3808 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3809 "2777 Start post-quiescent FCF table scan\n"); 3810 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 3811 if (rc) 3812 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3813 "2747 Issue FCF scan read FCF mailbox " 3814 "command failed 0x%x\n", rc); 3815 } 3816 3817 /** 3818 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 3819 * @phba: pointer to lpfc hba data structure. 3820 * @dev_grp: The HBA PCI-Device group number. 3821 * 3822 * This routine is invoked to set up the per HBA PCI-Device group function 3823 * API jump table entries. 3824 * 3825 * Return: 0 if success, otherwise -ENODEV 3826 **/ 3827 int 3828 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 3829 { 3830 int rc; 3831 3832 /* Set up lpfc PCI-device group */ 3833 phba->pci_dev_grp = dev_grp; 3834 3835 /* The LPFC_PCI_DEV_OC uses SLI4 */ 3836 if (dev_grp == LPFC_PCI_DEV_OC) 3837 phba->sli_rev = LPFC_SLI_REV4; 3838 3839 /* Set up device INIT API function jump table */ 3840 rc = lpfc_init_api_table_setup(phba, dev_grp); 3841 if (rc) 3842 return -ENODEV; 3843 /* Set up SCSI API function jump table */ 3844 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 3845 if (rc) 3846 return -ENODEV; 3847 /* Set up SLI API function jump table */ 3848 rc = lpfc_sli_api_table_setup(phba, dev_grp); 3849 if (rc) 3850 return -ENODEV; 3851 /* Set up MBOX API function jump table */ 3852 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 3853 if (rc) 3854 return -ENODEV; 3855 3856 return 0; 3857 } 3858 3859 /** 3860 * lpfc_log_intr_mode - Log the active interrupt mode 3861 * @phba: pointer to lpfc hba data structure. 3862 * @intr_mode: active interrupt mode adopted. 3863 * 3864 * This routine it invoked to log the currently used active interrupt mode 3865 * to the device. 3866 **/ 3867 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 3868 { 3869 switch (intr_mode) { 3870 case 0: 3871 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3872 "0470 Enable INTx interrupt mode.\n"); 3873 break; 3874 case 1: 3875 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3876 "0481 Enabled MSI interrupt mode.\n"); 3877 break; 3878 case 2: 3879 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3880 "0480 Enabled MSI-X interrupt mode.\n"); 3881 break; 3882 default: 3883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3884 "0482 Illegal interrupt mode.\n"); 3885 break; 3886 } 3887 return; 3888 } 3889 3890 /** 3891 * lpfc_enable_pci_dev - Enable a generic PCI device. 3892 * @phba: pointer to lpfc hba data structure. 3893 * 3894 * This routine is invoked to enable the PCI device that is common to all 3895 * PCI devices. 3896 * 3897 * Return codes 3898 * 0 - successful 3899 * other values - error 3900 **/ 3901 static int 3902 lpfc_enable_pci_dev(struct lpfc_hba *phba) 3903 { 3904 struct pci_dev *pdev; 3905 int bars; 3906 3907 /* Obtain PCI device reference */ 3908 if (!phba->pcidev) 3909 goto out_error; 3910 else 3911 pdev = phba->pcidev; 3912 /* Select PCI BARs */ 3913 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3914 /* Enable PCI device */ 3915 if (pci_enable_device_mem(pdev)) 3916 goto out_error; 3917 /* Request PCI resource for the device */ 3918 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) 3919 goto out_disable_device; 3920 /* Set up device as PCI master and save state for EEH */ 3921 pci_set_master(pdev); 3922 pci_try_set_mwi(pdev); 3923 pci_save_state(pdev); 3924 3925 return 0; 3926 3927 out_disable_device: 3928 pci_disable_device(pdev); 3929 out_error: 3930 return -ENODEV; 3931 } 3932 3933 /** 3934 * lpfc_disable_pci_dev - Disable a generic PCI device. 3935 * @phba: pointer to lpfc hba data structure. 3936 * 3937 * This routine is invoked to disable the PCI device that is common to all 3938 * PCI devices. 3939 **/ 3940 static void 3941 lpfc_disable_pci_dev(struct lpfc_hba *phba) 3942 { 3943 struct pci_dev *pdev; 3944 int bars; 3945 3946 /* Obtain PCI device reference */ 3947 if (!phba->pcidev) 3948 return; 3949 else 3950 pdev = phba->pcidev; 3951 /* Select PCI BARs */ 3952 bars = pci_select_bars(pdev, IORESOURCE_MEM); 3953 /* Release PCI resource and disable PCI device */ 3954 pci_release_selected_regions(pdev, bars); 3955 pci_disable_device(pdev); 3956 /* Null out PCI private reference to driver */ 3957 pci_set_drvdata(pdev, NULL); 3958 3959 return; 3960 } 3961 3962 /** 3963 * lpfc_reset_hba - Reset a hba 3964 * @phba: pointer to lpfc hba data structure. 3965 * 3966 * This routine is invoked to reset a hba device. It brings the HBA 3967 * offline, performs a board restart, and then brings the board back 3968 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 3969 * on outstanding mailbox commands. 3970 **/ 3971 void 3972 lpfc_reset_hba(struct lpfc_hba *phba) 3973 { 3974 /* If resets are disabled then set error state and return. */ 3975 if (!phba->cfg_enable_hba_reset) { 3976 phba->link_state = LPFC_HBA_ERROR; 3977 return; 3978 } 3979 lpfc_offline_prep(phba); 3980 lpfc_offline(phba); 3981 lpfc_sli_brdrestart(phba); 3982 lpfc_online(phba); 3983 lpfc_unblock_mgmt_io(phba); 3984 } 3985 3986 /** 3987 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 3988 * @phba: pointer to lpfc hba data structure. 3989 * 3990 * This routine is invoked to set up the driver internal resources specific to 3991 * support the SLI-3 HBA device it attached to. 3992 * 3993 * Return codes 3994 * 0 - successful 3995 * other values - error 3996 **/ 3997 static int 3998 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 3999 { 4000 struct lpfc_sli *psli; 4001 4002 /* 4003 * Initialize timers used by driver 4004 */ 4005 4006 /* Heartbeat timer */ 4007 init_timer(&phba->hb_tmofunc); 4008 phba->hb_tmofunc.function = lpfc_hb_timeout; 4009 phba->hb_tmofunc.data = (unsigned long)phba; 4010 4011 psli = &phba->sli; 4012 /* MBOX heartbeat timer */ 4013 init_timer(&psli->mbox_tmo); 4014 psli->mbox_tmo.function = lpfc_mbox_timeout; 4015 psli->mbox_tmo.data = (unsigned long) phba; 4016 /* FCP polling mode timer */ 4017 init_timer(&phba->fcp_poll_timer); 4018 phba->fcp_poll_timer.function = lpfc_poll_timeout; 4019 phba->fcp_poll_timer.data = (unsigned long) phba; 4020 /* Fabric block timer */ 4021 init_timer(&phba->fabric_block_timer); 4022 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4023 phba->fabric_block_timer.data = (unsigned long) phba; 4024 /* EA polling mode timer */ 4025 init_timer(&phba->eratt_poll); 4026 phba->eratt_poll.function = lpfc_poll_eratt; 4027 phba->eratt_poll.data = (unsigned long) phba; 4028 4029 /* Host attention work mask setup */ 4030 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 4031 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 4032 4033 /* Get all the module params for configuring this host */ 4034 lpfc_get_cfgparam(phba); 4035 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 4036 phba->menlo_flag |= HBA_MENLO_SUPPORT; 4037 /* check for menlo minimum sg count */ 4038 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 4039 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4040 } 4041 4042 /* 4043 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4044 * used to create the sg_dma_buf_pool must be dynamically calculated. 4045 * 2 segments are added since the IOCB needs a command and response bde. 4046 */ 4047 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 4048 sizeof(struct fcp_rsp) + 4049 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 4050 4051 if (phba->cfg_enable_bg) { 4052 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; 4053 phba->cfg_sg_dma_buf_size += 4054 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); 4055 } 4056 4057 /* Also reinitialize the host templates with new values. */ 4058 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4059 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 4060 4061 phba->max_vpi = LPFC_MAX_VPI; 4062 /* This will be set to correct value after config_port mbox */ 4063 phba->max_vports = 0; 4064 4065 /* 4066 * Initialize the SLI Layer to run with lpfc HBAs. 4067 */ 4068 lpfc_sli_setup(phba); 4069 lpfc_sli_queue_setup(phba); 4070 4071 /* Allocate device driver memory */ 4072 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 4073 return -ENOMEM; 4074 4075 return 0; 4076 } 4077 4078 /** 4079 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 4080 * @phba: pointer to lpfc hba data structure. 4081 * 4082 * This routine is invoked to unset the driver internal resources set up 4083 * specific for supporting the SLI-3 HBA device it attached to. 4084 **/ 4085 static void 4086 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 4087 { 4088 /* Free device driver memory allocated */ 4089 lpfc_mem_free_all(phba); 4090 4091 return; 4092 } 4093 4094 /** 4095 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 4096 * @phba: pointer to lpfc hba data structure. 4097 * 4098 * This routine is invoked to set up the driver internal resources specific to 4099 * support the SLI-4 HBA device it attached to. 4100 * 4101 * Return codes 4102 * 0 - successful 4103 * other values - error 4104 **/ 4105 static int 4106 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 4107 { 4108 struct lpfc_sli *psli; 4109 LPFC_MBOXQ_t *mboxq; 4110 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; 4111 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4112 struct lpfc_mqe *mqe; 4113 int longs, sli_family; 4114 4115 /* Before proceed, wait for POST done and device ready */ 4116 rc = lpfc_sli4_post_status_check(phba); 4117 if (rc) 4118 return -ENODEV; 4119 4120 /* 4121 * Initialize timers used by driver 4122 */ 4123 4124 /* Heartbeat timer */ 4125 init_timer(&phba->hb_tmofunc); 4126 phba->hb_tmofunc.function = lpfc_hb_timeout; 4127 phba->hb_tmofunc.data = (unsigned long)phba; 4128 init_timer(&phba->rrq_tmr); 4129 phba->rrq_tmr.function = lpfc_rrq_timeout; 4130 phba->rrq_tmr.data = (unsigned long)phba; 4131 4132 psli = &phba->sli; 4133 /* MBOX heartbeat timer */ 4134 init_timer(&psli->mbox_tmo); 4135 psli->mbox_tmo.function = lpfc_mbox_timeout; 4136 psli->mbox_tmo.data = (unsigned long) phba; 4137 /* Fabric block timer */ 4138 init_timer(&phba->fabric_block_timer); 4139 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 4140 phba->fabric_block_timer.data = (unsigned long) phba; 4141 /* EA polling mode timer */ 4142 init_timer(&phba->eratt_poll); 4143 phba->eratt_poll.function = lpfc_poll_eratt; 4144 phba->eratt_poll.data = (unsigned long) phba; 4145 /* FCF rediscover timer */ 4146 init_timer(&phba->fcf.redisc_wait); 4147 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 4148 phba->fcf.redisc_wait.data = (unsigned long)phba; 4149 4150 /* 4151 * We need to do a READ_CONFIG mailbox command here before 4152 * calling lpfc_get_cfgparam. For VFs this will report the 4153 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. 4154 * All of the resources allocated 4155 * for this Port are tied to these values. 4156 */ 4157 /* Get all the module params for configuring this host */ 4158 lpfc_get_cfgparam(phba); 4159 phba->max_vpi = LPFC_MAX_VPI; 4160 /* This will be set to correct value after the read_config mbox */ 4161 phba->max_vports = 0; 4162 4163 /* Program the default value of vlan_id and fc_map */ 4164 phba->valid_vlan = 0; 4165 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4166 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4167 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4168 4169 /* 4170 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4171 * used to create the sg_dma_buf_pool must be dynamically calculated. 4172 * 2 segments are added since the IOCB needs a command and response bde. 4173 * To insure that the scsi sgl does not cross a 4k page boundary only 4174 * sgl sizes of must be a power of 2. 4175 */ 4176 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4177 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4178 4179 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4180 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4181 switch (sli_family) { 4182 case LPFC_SLI_INTF_FAMILY_BE2: 4183 case LPFC_SLI_INTF_FAMILY_BE3: 4184 /* There is a single hint for BE - 2 pages per BPL. */ 4185 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == 4186 LPFC_SLI_INTF_SLI_HINT1_1) 4187 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; 4188 break; 4189 case LPFC_SLI_INTF_FAMILY_LNCR_A0: 4190 case LPFC_SLI_INTF_FAMILY_LNCR_B0: 4191 default: 4192 break; 4193 } 4194 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4195 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4196 dma_buf_size = dma_buf_size << 1) 4197 ; 4198 if (dma_buf_size == max_buf_size) 4199 phba->cfg_sg_seg_cnt = (dma_buf_size - 4200 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - 4201 (2 * sizeof(struct sli4_sge))) / 4202 sizeof(struct sli4_sge); 4203 phba->cfg_sg_dma_buf_size = dma_buf_size; 4204 4205 /* Initialize buffer queue management fields */ 4206 hbq_count = lpfc_sli_hbq_count(); 4207 for (i = 0; i < hbq_count; ++i) 4208 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 4209 INIT_LIST_HEAD(&phba->rb_pend_list); 4210 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 4211 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 4212 4213 /* 4214 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 4215 */ 4216 /* Initialize the Abort scsi buffer list used by driver */ 4217 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 4218 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 4219 /* This abort list used by worker thread */ 4220 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 4221 4222 /* 4223 * Initialize dirver internal slow-path work queues 4224 */ 4225 4226 /* Driver internel slow-path CQ Event pool */ 4227 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 4228 /* Response IOCB work queue list */ 4229 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 4230 /* Asynchronous event CQ Event work queue list */ 4231 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 4232 /* Fast-path XRI aborted CQ Event work queue list */ 4233 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 4234 /* Slow-path XRI aborted CQ Event work queue list */ 4235 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 4236 /* Receive queue CQ Event work queue list */ 4237 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 4238 4239 /* Initialize the driver internal SLI layer lists. */ 4240 lpfc_sli_setup(phba); 4241 lpfc_sli_queue_setup(phba); 4242 4243 /* Allocate device driver memory */ 4244 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 4245 if (rc) 4246 return -ENOMEM; 4247 4248 /* IF Type 2 ports get initialized now. */ 4249 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4250 LPFC_SLI_INTF_IF_TYPE_2) { 4251 rc = lpfc_pci_function_reset(phba); 4252 if (unlikely(rc)) 4253 return -ENODEV; 4254 } 4255 4256 /* Create the bootstrap mailbox command */ 4257 rc = lpfc_create_bootstrap_mbox(phba); 4258 if (unlikely(rc)) 4259 goto out_free_mem; 4260 4261 /* Set up the host's endian order with the device. */ 4262 rc = lpfc_setup_endian_order(phba); 4263 if (unlikely(rc)) 4264 goto out_free_bsmbx; 4265 4266 /* Set up the hba's configuration parameters. */ 4267 rc = lpfc_sli4_read_config(phba); 4268 if (unlikely(rc)) 4269 goto out_free_bsmbx; 4270 4271 /* IF Type 0 ports get initialized now. */ 4272 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 4273 LPFC_SLI_INTF_IF_TYPE_0) { 4274 rc = lpfc_pci_function_reset(phba); 4275 if (unlikely(rc)) 4276 goto out_free_bsmbx; 4277 } 4278 4279 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 4280 GFP_KERNEL); 4281 if (!mboxq) { 4282 rc = -ENOMEM; 4283 goto out_free_bsmbx; 4284 } 4285 4286 /* Get the Supported Pages. It is always available. */ 4287 lpfc_supported_pages(mboxq); 4288 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4289 if (unlikely(rc)) { 4290 rc = -EIO; 4291 mempool_free(mboxq, phba->mbox_mem_pool); 4292 goto out_free_bsmbx; 4293 } 4294 4295 mqe = &mboxq->u.mqe; 4296 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 4297 LPFC_MAX_SUPPORTED_PAGES); 4298 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 4299 switch (pn_page[i]) { 4300 case LPFC_SLI4_PARAMETERS: 4301 phba->sli4_hba.pc_sli4_params.supported = 1; 4302 break; 4303 default: 4304 break; 4305 } 4306 } 4307 4308 /* Read the port's SLI4 Parameters capabilities if supported. */ 4309 if (phba->sli4_hba.pc_sli4_params.supported) 4310 rc = lpfc_pc_sli4_params_get(phba, mboxq); 4311 mempool_free(mboxq, phba->mbox_mem_pool); 4312 if (rc) { 4313 rc = -EIO; 4314 goto out_free_bsmbx; 4315 } 4316 /* Create all the SLI4 queues */ 4317 rc = lpfc_sli4_queue_create(phba); 4318 if (rc) 4319 goto out_free_bsmbx; 4320 4321 /* Create driver internal CQE event pool */ 4322 rc = lpfc_sli4_cq_event_pool_create(phba); 4323 if (rc) 4324 goto out_destroy_queue; 4325 4326 /* Initialize and populate the iocb list per host */ 4327 rc = lpfc_init_sgl_list(phba); 4328 if (rc) { 4329 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4330 "1400 Failed to initialize sgl list.\n"); 4331 goto out_destroy_cq_event_pool; 4332 } 4333 rc = lpfc_init_active_sgl_array(phba); 4334 if (rc) { 4335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4336 "1430 Failed to initialize sgl list.\n"); 4337 goto out_free_sgl_list; 4338 } 4339 4340 rc = lpfc_sli4_init_rpi_hdrs(phba); 4341 if (rc) { 4342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4343 "1432 Failed to initialize rpi headers.\n"); 4344 goto out_free_active_sgl; 4345 } 4346 4347 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 4348 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 4349 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 4350 GFP_KERNEL); 4351 if (!phba->fcf.fcf_rr_bmask) { 4352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4353 "2759 Failed allocate memory for FCF round " 4354 "robin failover bmask\n"); 4355 goto out_remove_rpi_hdrs; 4356 } 4357 4358 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4359 phba->cfg_fcp_eq_count), GFP_KERNEL); 4360 if (!phba->sli4_hba.fcp_eq_hdl) { 4361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4362 "2572 Failed allocate memory for fast-path " 4363 "per-EQ handle array\n"); 4364 goto out_free_fcf_rr_bmask; 4365 } 4366 4367 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4368 phba->sli4_hba.cfg_eqn), GFP_KERNEL); 4369 if (!phba->sli4_hba.msix_entries) { 4370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4371 "2573 Failed allocate memory for msi-x " 4372 "interrupt vector entries\n"); 4373 goto out_free_fcp_eq_hdl; 4374 } 4375 4376 return rc; 4377 4378 out_free_fcp_eq_hdl: 4379 kfree(phba->sli4_hba.fcp_eq_hdl); 4380 out_free_fcf_rr_bmask: 4381 kfree(phba->fcf.fcf_rr_bmask); 4382 out_remove_rpi_hdrs: 4383 lpfc_sli4_remove_rpi_hdrs(phba); 4384 out_free_active_sgl: 4385 lpfc_free_active_sgl(phba); 4386 out_free_sgl_list: 4387 lpfc_free_sgl_list(phba); 4388 out_destroy_cq_event_pool: 4389 lpfc_sli4_cq_event_pool_destroy(phba); 4390 out_destroy_queue: 4391 lpfc_sli4_queue_destroy(phba); 4392 out_free_bsmbx: 4393 lpfc_destroy_bootstrap_mbox(phba); 4394 out_free_mem: 4395 lpfc_mem_free(phba); 4396 return rc; 4397 } 4398 4399 /** 4400 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 4401 * @phba: pointer to lpfc hba data structure. 4402 * 4403 * This routine is invoked to unset the driver internal resources set up 4404 * specific for supporting the SLI-4 HBA device it attached to. 4405 **/ 4406 static void 4407 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 4408 { 4409 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4410 4411 /* Free memory allocated for msi-x interrupt vector entries */ 4412 kfree(phba->sli4_hba.msix_entries); 4413 4414 /* Free memory allocated for fast-path work queue handles */ 4415 kfree(phba->sli4_hba.fcp_eq_hdl); 4416 4417 /* Free the allocated rpi headers. */ 4418 lpfc_sli4_remove_rpi_hdrs(phba); 4419 lpfc_sli4_remove_rpis(phba); 4420 4421 /* Free eligible FCF index bmask */ 4422 kfree(phba->fcf.fcf_rr_bmask); 4423 4424 /* Free the ELS sgl list */ 4425 lpfc_free_active_sgl(phba); 4426 lpfc_free_sgl_list(phba); 4427 4428 /* Free the SCSI sgl management array */ 4429 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4430 4431 /* Free the SLI4 queues */ 4432 lpfc_sli4_queue_destroy(phba); 4433 4434 /* Free the completion queue EQ event pool */ 4435 lpfc_sli4_cq_event_release_all(phba); 4436 lpfc_sli4_cq_event_pool_destroy(phba); 4437 4438 /* Free the bsmbx region. */ 4439 lpfc_destroy_bootstrap_mbox(phba); 4440 4441 /* Free the SLI Layer memory with SLI4 HBAs */ 4442 lpfc_mem_free_all(phba); 4443 4444 /* Free the current connect table */ 4445 list_for_each_entry_safe(conn_entry, next_conn_entry, 4446 &phba->fcf_conn_rec_list, list) { 4447 list_del_init(&conn_entry->list); 4448 kfree(conn_entry); 4449 } 4450 4451 return; 4452 } 4453 4454 /** 4455 * lpfc_init_api_table_setup - Set up init api fucntion jump table 4456 * @phba: The hba struct for which this call is being executed. 4457 * @dev_grp: The HBA PCI-Device group number. 4458 * 4459 * This routine sets up the device INIT interface API function jump table 4460 * in @phba struct. 4461 * 4462 * Returns: 0 - success, -ENODEV - failure. 4463 **/ 4464 int 4465 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4466 { 4467 phba->lpfc_hba_init_link = lpfc_hba_init_link; 4468 phba->lpfc_hba_down_link = lpfc_hba_down_link; 4469 switch (dev_grp) { 4470 case LPFC_PCI_DEV_LP: 4471 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 4472 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 4473 phba->lpfc_stop_port = lpfc_stop_port_s3; 4474 break; 4475 case LPFC_PCI_DEV_OC: 4476 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 4477 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 4478 phba->lpfc_stop_port = lpfc_stop_port_s4; 4479 break; 4480 default: 4481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4482 "1431 Invalid HBA PCI-device group: 0x%x\n", 4483 dev_grp); 4484 return -ENODEV; 4485 break; 4486 } 4487 return 0; 4488 } 4489 4490 /** 4491 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 4492 * @phba: pointer to lpfc hba data structure. 4493 * 4494 * This routine is invoked to set up the driver internal resources before the 4495 * device specific resource setup to support the HBA device it attached to. 4496 * 4497 * Return codes 4498 * 0 - successful 4499 * other values - error 4500 **/ 4501 static int 4502 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 4503 { 4504 /* 4505 * Driver resources common to all SLI revisions 4506 */ 4507 atomic_set(&phba->fast_event_count, 0); 4508 spin_lock_init(&phba->hbalock); 4509 4510 /* Initialize ndlp management spinlock */ 4511 spin_lock_init(&phba->ndlp_lock); 4512 4513 INIT_LIST_HEAD(&phba->port_list); 4514 INIT_LIST_HEAD(&phba->work_list); 4515 init_waitqueue_head(&phba->wait_4_mlo_m_q); 4516 4517 /* Initialize the wait queue head for the kernel thread */ 4518 init_waitqueue_head(&phba->work_waitq); 4519 4520 /* Initialize the scsi buffer list used by driver for scsi IO */ 4521 spin_lock_init(&phba->scsi_buf_list_lock); 4522 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 4523 4524 /* Initialize the fabric iocb list */ 4525 INIT_LIST_HEAD(&phba->fabric_iocb_list); 4526 4527 /* Initialize list to save ELS buffers */ 4528 INIT_LIST_HEAD(&phba->elsbuf); 4529 4530 /* Initialize FCF connection rec list */ 4531 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 4532 4533 return 0; 4534 } 4535 4536 /** 4537 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 4538 * @phba: pointer to lpfc hba data structure. 4539 * 4540 * This routine is invoked to set up the driver internal resources after the 4541 * device specific resource setup to support the HBA device it attached to. 4542 * 4543 * Return codes 4544 * 0 - successful 4545 * other values - error 4546 **/ 4547 static int 4548 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 4549 { 4550 int error; 4551 4552 /* Startup the kernel thread for this host adapter. */ 4553 phba->worker_thread = kthread_run(lpfc_do_work, phba, 4554 "lpfc_worker_%d", phba->brd_no); 4555 if (IS_ERR(phba->worker_thread)) { 4556 error = PTR_ERR(phba->worker_thread); 4557 return error; 4558 } 4559 4560 return 0; 4561 } 4562 4563 /** 4564 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 4565 * @phba: pointer to lpfc hba data structure. 4566 * 4567 * This routine is invoked to unset the driver internal resources set up after 4568 * the device specific resource setup for supporting the HBA device it 4569 * attached to. 4570 **/ 4571 static void 4572 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 4573 { 4574 /* Stop kernel worker thread */ 4575 kthread_stop(phba->worker_thread); 4576 } 4577 4578 /** 4579 * lpfc_free_iocb_list - Free iocb list. 4580 * @phba: pointer to lpfc hba data structure. 4581 * 4582 * This routine is invoked to free the driver's IOCB list and memory. 4583 **/ 4584 static void 4585 lpfc_free_iocb_list(struct lpfc_hba *phba) 4586 { 4587 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 4588 4589 spin_lock_irq(&phba->hbalock); 4590 list_for_each_entry_safe(iocbq_entry, iocbq_next, 4591 &phba->lpfc_iocb_list, list) { 4592 list_del(&iocbq_entry->list); 4593 kfree(iocbq_entry); 4594 phba->total_iocbq_bufs--; 4595 } 4596 spin_unlock_irq(&phba->hbalock); 4597 4598 return; 4599 } 4600 4601 /** 4602 * lpfc_init_iocb_list - Allocate and initialize iocb list. 4603 * @phba: pointer to lpfc hba data structure. 4604 * 4605 * This routine is invoked to allocate and initizlize the driver's IOCB 4606 * list and set up the IOCB tag array accordingly. 4607 * 4608 * Return codes 4609 * 0 - successful 4610 * other values - error 4611 **/ 4612 static int 4613 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 4614 { 4615 struct lpfc_iocbq *iocbq_entry = NULL; 4616 uint16_t iotag; 4617 int i; 4618 4619 /* Initialize and populate the iocb list per host. */ 4620 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 4621 for (i = 0; i < iocb_count; i++) { 4622 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 4623 if (iocbq_entry == NULL) { 4624 printk(KERN_ERR "%s: only allocated %d iocbs of " 4625 "expected %d count. Unloading driver.\n", 4626 __func__, i, LPFC_IOCB_LIST_CNT); 4627 goto out_free_iocbq; 4628 } 4629 4630 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 4631 if (iotag == 0) { 4632 kfree(iocbq_entry); 4633 printk(KERN_ERR "%s: failed to allocate IOTAG. " 4634 "Unloading driver.\n", __func__); 4635 goto out_free_iocbq; 4636 } 4637 iocbq_entry->sli4_xritag = NO_XRI; 4638 4639 spin_lock_irq(&phba->hbalock); 4640 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 4641 phba->total_iocbq_bufs++; 4642 spin_unlock_irq(&phba->hbalock); 4643 } 4644 4645 return 0; 4646 4647 out_free_iocbq: 4648 lpfc_free_iocb_list(phba); 4649 4650 return -ENOMEM; 4651 } 4652 4653 /** 4654 * lpfc_free_sgl_list - Free sgl list. 4655 * @phba: pointer to lpfc hba data structure. 4656 * 4657 * This routine is invoked to free the driver's sgl list and memory. 4658 **/ 4659 static void 4660 lpfc_free_sgl_list(struct lpfc_hba *phba) 4661 { 4662 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 4663 LIST_HEAD(sglq_list); 4664 4665 spin_lock_irq(&phba->hbalock); 4666 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 4667 spin_unlock_irq(&phba->hbalock); 4668 4669 list_for_each_entry_safe(sglq_entry, sglq_next, 4670 &sglq_list, list) { 4671 list_del(&sglq_entry->list); 4672 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 4673 kfree(sglq_entry); 4674 phba->sli4_hba.total_sglq_bufs--; 4675 } 4676 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4677 } 4678 4679 /** 4680 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 4681 * @phba: pointer to lpfc hba data structure. 4682 * 4683 * This routine is invoked to allocate the driver's active sgl memory. 4684 * This array will hold the sglq_entry's for active IOs. 4685 **/ 4686 static int 4687 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 4688 { 4689 int size; 4690 size = sizeof(struct lpfc_sglq *); 4691 size *= phba->sli4_hba.max_cfg_param.max_xri; 4692 4693 phba->sli4_hba.lpfc_sglq_active_list = 4694 kzalloc(size, GFP_KERNEL); 4695 if (!phba->sli4_hba.lpfc_sglq_active_list) 4696 return -ENOMEM; 4697 return 0; 4698 } 4699 4700 /** 4701 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 4702 * @phba: pointer to lpfc hba data structure. 4703 * 4704 * This routine is invoked to walk through the array of active sglq entries 4705 * and free all of the resources. 4706 * This is just a place holder for now. 4707 **/ 4708 static void 4709 lpfc_free_active_sgl(struct lpfc_hba *phba) 4710 { 4711 kfree(phba->sli4_hba.lpfc_sglq_active_list); 4712 } 4713 4714 /** 4715 * lpfc_init_sgl_list - Allocate and initialize sgl list. 4716 * @phba: pointer to lpfc hba data structure. 4717 * 4718 * This routine is invoked to allocate and initizlize the driver's sgl 4719 * list and set up the sgl xritag tag array accordingly. 4720 * 4721 * Return codes 4722 * 0 - successful 4723 * other values - error 4724 **/ 4725 static int 4726 lpfc_init_sgl_list(struct lpfc_hba *phba) 4727 { 4728 struct lpfc_sglq *sglq_entry = NULL; 4729 int i; 4730 int els_xri_cnt; 4731 4732 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 4733 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4734 "2400 lpfc_init_sgl_list els %d.\n", 4735 els_xri_cnt); 4736 /* Initialize and populate the sglq list per host/VF. */ 4737 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 4738 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 4739 4740 /* Sanity check on XRI management */ 4741 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { 4742 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4743 "2562 No room left for SCSI XRI allocation: " 4744 "max_xri=%d, els_xri=%d\n", 4745 phba->sli4_hba.max_cfg_param.max_xri, 4746 els_xri_cnt); 4747 return -ENOMEM; 4748 } 4749 4750 /* Allocate memory for the ELS XRI management array */ 4751 phba->sli4_hba.lpfc_els_sgl_array = 4752 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), 4753 GFP_KERNEL); 4754 4755 if (!phba->sli4_hba.lpfc_els_sgl_array) { 4756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4757 "2401 Failed to allocate memory for ELS " 4758 "XRI management array of size %d.\n", 4759 els_xri_cnt); 4760 return -ENOMEM; 4761 } 4762 4763 /* Keep the SCSI XRI into the XRI management array */ 4764 phba->sli4_hba.scsi_xri_max = 4765 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; 4766 phba->sli4_hba.scsi_xri_cnt = 0; 4767 4768 phba->sli4_hba.lpfc_scsi_psb_array = 4769 kzalloc((sizeof(struct lpfc_scsi_buf *) * 4770 phba->sli4_hba.scsi_xri_max), GFP_KERNEL); 4771 4772 if (!phba->sli4_hba.lpfc_scsi_psb_array) { 4773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4774 "2563 Failed to allocate memory for SCSI " 4775 "XRI management array of size %d.\n", 4776 phba->sli4_hba.scsi_xri_max); 4777 kfree(phba->sli4_hba.lpfc_els_sgl_array); 4778 return -ENOMEM; 4779 } 4780 4781 for (i = 0; i < els_xri_cnt; i++) { 4782 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); 4783 if (sglq_entry == NULL) { 4784 printk(KERN_ERR "%s: only allocated %d sgls of " 4785 "expected %d count. Unloading driver.\n", 4786 __func__, i, els_xri_cnt); 4787 goto out_free_mem; 4788 } 4789 4790 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); 4791 if (sglq_entry->sli4_xritag == NO_XRI) { 4792 kfree(sglq_entry); 4793 printk(KERN_ERR "%s: failed to allocate XRI.\n" 4794 "Unloading driver.\n", __func__); 4795 goto out_free_mem; 4796 } 4797 sglq_entry->buff_type = GEN_BUFF_TYPE; 4798 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); 4799 if (sglq_entry->virt == NULL) { 4800 kfree(sglq_entry); 4801 printk(KERN_ERR "%s: failed to allocate mbuf.\n" 4802 "Unloading driver.\n", __func__); 4803 goto out_free_mem; 4804 } 4805 sglq_entry->sgl = sglq_entry->virt; 4806 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 4807 4808 /* The list order is used by later block SGL registraton */ 4809 spin_lock_irq(&phba->hbalock); 4810 sglq_entry->state = SGL_FREED; 4811 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); 4812 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; 4813 phba->sli4_hba.total_sglq_bufs++; 4814 spin_unlock_irq(&phba->hbalock); 4815 } 4816 return 0; 4817 4818 out_free_mem: 4819 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4820 lpfc_free_sgl_list(phba); 4821 return -ENOMEM; 4822 } 4823 4824 /** 4825 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 4826 * @phba: pointer to lpfc hba data structure. 4827 * 4828 * This routine is invoked to post rpi header templates to the 4829 * HBA consistent with the SLI-4 interface spec. This routine 4830 * posts a PAGE_SIZE memory region to the port to hold up to 4831 * PAGE_SIZE modulo 64 rpi context headers. 4832 * No locks are held here because this is an initialization routine 4833 * called only from probe or lpfc_online when interrupts are not 4834 * enabled and the driver is reinitializing the device. 4835 * 4836 * Return codes 4837 * 0 - successful 4838 * -ENOMEM - No availble memory 4839 * -EIO - The mailbox failed to complete successfully. 4840 **/ 4841 int 4842 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 4843 { 4844 int rc = 0; 4845 int longs; 4846 uint16_t rpi_count; 4847 struct lpfc_rpi_hdr *rpi_hdr; 4848 4849 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 4850 4851 /* 4852 * Provision an rpi bitmask range for discovery. The total count 4853 * is the difference between max and base + 1. 4854 */ 4855 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + 4856 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4857 4858 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; 4859 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), 4860 GFP_KERNEL); 4861 if (!phba->sli4_hba.rpi_bmask) 4862 return -ENOMEM; 4863 4864 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 4865 if (!rpi_hdr) { 4866 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4867 "0391 Error during rpi post operation\n"); 4868 lpfc_sli4_remove_rpis(phba); 4869 rc = -ENODEV; 4870 } 4871 4872 return rc; 4873 } 4874 4875 /** 4876 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 4877 * @phba: pointer to lpfc hba data structure. 4878 * 4879 * This routine is invoked to allocate a single 4KB memory region to 4880 * support rpis and stores them in the phba. This single region 4881 * provides support for up to 64 rpis. The region is used globally 4882 * by the device. 4883 * 4884 * Returns: 4885 * A valid rpi hdr on success. 4886 * A NULL pointer on any failure. 4887 **/ 4888 struct lpfc_rpi_hdr * 4889 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 4890 { 4891 uint16_t rpi_limit, curr_rpi_range; 4892 struct lpfc_dmabuf *dmabuf; 4893 struct lpfc_rpi_hdr *rpi_hdr; 4894 4895 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 4896 phba->sli4_hba.max_cfg_param.max_rpi - 1; 4897 4898 spin_lock_irq(&phba->hbalock); 4899 curr_rpi_range = phba->sli4_hba.next_rpi; 4900 spin_unlock_irq(&phba->hbalock); 4901 4902 /* 4903 * The port has a limited number of rpis. The increment here 4904 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 4905 * and to allow the full max_rpi range per port. 4906 */ 4907 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 4908 return NULL; 4909 4910 /* 4911 * First allocate the protocol header region for the port. The 4912 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 4913 */ 4914 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4915 if (!dmabuf) 4916 return NULL; 4917 4918 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4919 LPFC_HDR_TEMPLATE_SIZE, 4920 &dmabuf->phys, 4921 GFP_KERNEL); 4922 if (!dmabuf->virt) { 4923 rpi_hdr = NULL; 4924 goto err_free_dmabuf; 4925 } 4926 4927 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); 4928 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 4929 rpi_hdr = NULL; 4930 goto err_free_coherent; 4931 } 4932 4933 /* Save the rpi header data for cleanup later. */ 4934 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 4935 if (!rpi_hdr) 4936 goto err_free_coherent; 4937 4938 rpi_hdr->dmabuf = dmabuf; 4939 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 4940 rpi_hdr->page_count = 1; 4941 spin_lock_irq(&phba->hbalock); 4942 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; 4943 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 4944 4945 /* 4946 * The next_rpi stores the next module-64 rpi value to post 4947 * in any subsequent rpi memory region postings. 4948 */ 4949 phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; 4950 spin_unlock_irq(&phba->hbalock); 4951 return rpi_hdr; 4952 4953 err_free_coherent: 4954 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 4955 dmabuf->virt, dmabuf->phys); 4956 err_free_dmabuf: 4957 kfree(dmabuf); 4958 return NULL; 4959 } 4960 4961 /** 4962 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 4963 * @phba: pointer to lpfc hba data structure. 4964 * 4965 * This routine is invoked to remove all memory resources allocated 4966 * to support rpis. This routine presumes the caller has released all 4967 * rpis consumed by fabric or port logins and is prepared to have 4968 * the header pages removed. 4969 **/ 4970 void 4971 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 4972 { 4973 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 4974 4975 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 4976 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 4977 list_del(&rpi_hdr->list); 4978 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 4979 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 4980 kfree(rpi_hdr->dmabuf); 4981 kfree(rpi_hdr); 4982 } 4983 4984 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 4985 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); 4986 } 4987 4988 /** 4989 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 4990 * @pdev: pointer to pci device data structure. 4991 * 4992 * This routine is invoked to allocate the driver hba data structure for an 4993 * HBA device. If the allocation is successful, the phba reference to the 4994 * PCI device data structure is set. 4995 * 4996 * Return codes 4997 * pointer to @phba - successful 4998 * NULL - error 4999 **/ 5000 static struct lpfc_hba * 5001 lpfc_hba_alloc(struct pci_dev *pdev) 5002 { 5003 struct lpfc_hba *phba; 5004 5005 /* Allocate memory for HBA structure */ 5006 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 5007 if (!phba) { 5008 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 5009 return NULL; 5010 } 5011 5012 /* Set reference to PCI device in HBA structure */ 5013 phba->pcidev = pdev; 5014 5015 /* Assign an unused board number */ 5016 phba->brd_no = lpfc_get_instance(); 5017 if (phba->brd_no < 0) { 5018 kfree(phba); 5019 return NULL; 5020 } 5021 5022 spin_lock_init(&phba->ct_ev_lock); 5023 INIT_LIST_HEAD(&phba->ct_ev_waiters); 5024 5025 return phba; 5026 } 5027 5028 /** 5029 * lpfc_hba_free - Free driver hba data structure with a device. 5030 * @phba: pointer to lpfc hba data structure. 5031 * 5032 * This routine is invoked to free the driver hba data structure with an 5033 * HBA device. 5034 **/ 5035 static void 5036 lpfc_hba_free(struct lpfc_hba *phba) 5037 { 5038 /* Release the driver assigned board number */ 5039 idr_remove(&lpfc_hba_index, phba->brd_no); 5040 5041 kfree(phba); 5042 return; 5043 } 5044 5045 /** 5046 * lpfc_create_shost - Create hba physical port with associated scsi host. 5047 * @phba: pointer to lpfc hba data structure. 5048 * 5049 * This routine is invoked to create HBA physical port and associate a SCSI 5050 * host with it. 5051 * 5052 * Return codes 5053 * 0 - successful 5054 * other values - error 5055 **/ 5056 static int 5057 lpfc_create_shost(struct lpfc_hba *phba) 5058 { 5059 struct lpfc_vport *vport; 5060 struct Scsi_Host *shost; 5061 5062 /* Initialize HBA FC structure */ 5063 phba->fc_edtov = FF_DEF_EDTOV; 5064 phba->fc_ratov = FF_DEF_RATOV; 5065 phba->fc_altov = FF_DEF_ALTOV; 5066 phba->fc_arbtov = FF_DEF_ARBTOV; 5067 5068 atomic_set(&phba->sdev_cnt, 0); 5069 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 5070 if (!vport) 5071 return -ENODEV; 5072 5073 shost = lpfc_shost_from_vport(vport); 5074 phba->pport = vport; 5075 lpfc_debugfs_initialize(vport); 5076 /* Put reference to SCSI host to driver's device private data */ 5077 pci_set_drvdata(phba->pcidev, shost); 5078 5079 return 0; 5080 } 5081 5082 /** 5083 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 5084 * @phba: pointer to lpfc hba data structure. 5085 * 5086 * This routine is invoked to destroy HBA physical port and the associated 5087 * SCSI host. 5088 **/ 5089 static void 5090 lpfc_destroy_shost(struct lpfc_hba *phba) 5091 { 5092 struct lpfc_vport *vport = phba->pport; 5093 5094 /* Destroy physical port that associated with the SCSI host */ 5095 destroy_port(vport); 5096 5097 return; 5098 } 5099 5100 /** 5101 * lpfc_setup_bg - Setup Block guard structures and debug areas. 5102 * @phba: pointer to lpfc hba data structure. 5103 * @shost: the shost to be used to detect Block guard settings. 5104 * 5105 * This routine sets up the local Block guard protocol settings for @shost. 5106 * This routine also allocates memory for debugging bg buffers. 5107 **/ 5108 static void 5109 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 5110 { 5111 int pagecnt = 10; 5112 if (lpfc_prot_mask && lpfc_prot_guard) { 5113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5114 "1478 Registering BlockGuard with the " 5115 "SCSI layer\n"); 5116 scsi_host_set_prot(shost, lpfc_prot_mask); 5117 scsi_host_set_guard(shost, lpfc_prot_guard); 5118 } 5119 if (!_dump_buf_data) { 5120 while (pagecnt) { 5121 spin_lock_init(&_dump_buf_lock); 5122 _dump_buf_data = 5123 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5124 if (_dump_buf_data) { 5125 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5126 "9043 BLKGRD: allocated %d pages for " 5127 "_dump_buf_data at 0x%p\n", 5128 (1 << pagecnt), _dump_buf_data); 5129 _dump_buf_data_order = pagecnt; 5130 memset(_dump_buf_data, 0, 5131 ((1 << PAGE_SHIFT) << pagecnt)); 5132 break; 5133 } else 5134 --pagecnt; 5135 } 5136 if (!_dump_buf_data_order) 5137 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5138 "9044 BLKGRD: ERROR unable to allocate " 5139 "memory for hexdump\n"); 5140 } else 5141 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5142 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 5143 "\n", _dump_buf_data); 5144 if (!_dump_buf_dif) { 5145 while (pagecnt) { 5146 _dump_buf_dif = 5147 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 5148 if (_dump_buf_dif) { 5149 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5150 "9046 BLKGRD: allocated %d pages for " 5151 "_dump_buf_dif at 0x%p\n", 5152 (1 << pagecnt), _dump_buf_dif); 5153 _dump_buf_dif_order = pagecnt; 5154 memset(_dump_buf_dif, 0, 5155 ((1 << PAGE_SHIFT) << pagecnt)); 5156 break; 5157 } else 5158 --pagecnt; 5159 } 5160 if (!_dump_buf_dif_order) 5161 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5162 "9047 BLKGRD: ERROR unable to allocate " 5163 "memory for hexdump\n"); 5164 } else 5165 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 5166 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 5167 _dump_buf_dif); 5168 } 5169 5170 /** 5171 * lpfc_post_init_setup - Perform necessary device post initialization setup. 5172 * @phba: pointer to lpfc hba data structure. 5173 * 5174 * This routine is invoked to perform all the necessary post initialization 5175 * setup for the device. 5176 **/ 5177 static void 5178 lpfc_post_init_setup(struct lpfc_hba *phba) 5179 { 5180 struct Scsi_Host *shost; 5181 struct lpfc_adapter_event_header adapter_event; 5182 5183 /* Get the default values for Model Name and Description */ 5184 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 5185 5186 /* 5187 * hba setup may have changed the hba_queue_depth so we need to 5188 * adjust the value of can_queue. 5189 */ 5190 shost = pci_get_drvdata(phba->pcidev); 5191 shost->can_queue = phba->cfg_hba_queue_depth - 10; 5192 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 5193 lpfc_setup_bg(phba, shost); 5194 5195 lpfc_host_attrib_init(shost); 5196 5197 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 5198 spin_lock_irq(shost->host_lock); 5199 lpfc_poll_start_timer(phba); 5200 spin_unlock_irq(shost->host_lock); 5201 } 5202 5203 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5204 "0428 Perform SCSI scan\n"); 5205 /* Send board arrival event to upper layer */ 5206 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 5207 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 5208 fc_host_post_vendor_event(shost, fc_get_event_number(), 5209 sizeof(adapter_event), 5210 (char *) &adapter_event, 5211 LPFC_NL_VENDOR_ID); 5212 return; 5213 } 5214 5215 /** 5216 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 5217 * @phba: pointer to lpfc hba data structure. 5218 * 5219 * This routine is invoked to set up the PCI device memory space for device 5220 * with SLI-3 interface spec. 5221 * 5222 * Return codes 5223 * 0 - successful 5224 * other values - error 5225 **/ 5226 static int 5227 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 5228 { 5229 struct pci_dev *pdev; 5230 unsigned long bar0map_len, bar2map_len; 5231 int i, hbq_count; 5232 void *ptr; 5233 int error = -ENODEV; 5234 5235 /* Obtain PCI device reference */ 5236 if (!phba->pcidev) 5237 return error; 5238 else 5239 pdev = phba->pcidev; 5240 5241 /* Set the device DMA mask size */ 5242 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 5243 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 5244 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 5245 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 5246 return error; 5247 } 5248 } 5249 5250 /* Get the bus address of Bar0 and Bar2 and the number of bytes 5251 * required by each mapping. 5252 */ 5253 phba->pci_bar0_map = pci_resource_start(pdev, 0); 5254 bar0map_len = pci_resource_len(pdev, 0); 5255 5256 phba->pci_bar2_map = pci_resource_start(pdev, 2); 5257 bar2map_len = pci_resource_len(pdev, 2); 5258 5259 /* Map HBA SLIM to a kernel virtual address. */ 5260 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 5261 if (!phba->slim_memmap_p) { 5262 dev_printk(KERN_ERR, &pdev->dev, 5263 "ioremap failed for SLIM memory.\n"); 5264 goto out; 5265 } 5266 5267 /* Map HBA Control Registers to a kernel virtual address. */ 5268 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 5269 if (!phba->ctrl_regs_memmap_p) { 5270 dev_printk(KERN_ERR, &pdev->dev, 5271 "ioremap failed for HBA control registers.\n"); 5272 goto out_iounmap_slim; 5273 } 5274 5275 /* Allocate memory for SLI-2 structures */ 5276 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 5277 SLI2_SLIM_SIZE, 5278 &phba->slim2p.phys, 5279 GFP_KERNEL); 5280 if (!phba->slim2p.virt) 5281 goto out_iounmap; 5282 5283 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); 5284 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 5285 phba->mbox_ext = (phba->slim2p.virt + 5286 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 5287 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 5288 phba->IOCBs = (phba->slim2p.virt + 5289 offsetof(struct lpfc_sli2_slim, IOCBs)); 5290 5291 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 5292 lpfc_sli_hbq_size(), 5293 &phba->hbqslimp.phys, 5294 GFP_KERNEL); 5295 if (!phba->hbqslimp.virt) 5296 goto out_free_slim; 5297 5298 hbq_count = lpfc_sli_hbq_count(); 5299 ptr = phba->hbqslimp.virt; 5300 for (i = 0; i < hbq_count; ++i) { 5301 phba->hbqs[i].hbq_virt = ptr; 5302 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5303 ptr += (lpfc_hbq_defs[i]->entry_count * 5304 sizeof(struct lpfc_hbq_entry)); 5305 } 5306 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 5307 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 5308 5309 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 5310 5311 INIT_LIST_HEAD(&phba->rb_pend_list); 5312 5313 phba->MBslimaddr = phba->slim_memmap_p; 5314 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 5315 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 5316 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 5317 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 5318 5319 return 0; 5320 5321 out_free_slim: 5322 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5323 phba->slim2p.virt, phba->slim2p.phys); 5324 out_iounmap: 5325 iounmap(phba->ctrl_regs_memmap_p); 5326 out_iounmap_slim: 5327 iounmap(phba->slim_memmap_p); 5328 out: 5329 return error; 5330 } 5331 5332 /** 5333 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 5334 * @phba: pointer to lpfc hba data structure. 5335 * 5336 * This routine is invoked to unset the PCI device memory space for device 5337 * with SLI-3 interface spec. 5338 **/ 5339 static void 5340 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 5341 { 5342 struct pci_dev *pdev; 5343 5344 /* Obtain PCI device reference */ 5345 if (!phba->pcidev) 5346 return; 5347 else 5348 pdev = phba->pcidev; 5349 5350 /* Free coherent DMA memory allocated */ 5351 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 5352 phba->hbqslimp.virt, phba->hbqslimp.phys); 5353 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 5354 phba->slim2p.virt, phba->slim2p.phys); 5355 5356 /* I/O memory unmap */ 5357 iounmap(phba->ctrl_regs_memmap_p); 5358 iounmap(phba->slim_memmap_p); 5359 5360 return; 5361 } 5362 5363 /** 5364 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 5365 * @phba: pointer to lpfc hba data structure. 5366 * 5367 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 5368 * done and check status. 5369 * 5370 * Return 0 if successful, otherwise -ENODEV. 5371 **/ 5372 int 5373 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 5374 { 5375 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 5376 struct lpfc_register reg_data; 5377 int i, port_error = 0; 5378 uint32_t if_type; 5379 5380 if (!phba->sli4_hba.PSMPHRregaddr) 5381 return -ENODEV; 5382 5383 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 5384 for (i = 0; i < 3000; i++) { 5385 portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); 5386 if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { 5387 /* Port has a fatal POST error, break out */ 5388 port_error = -ENODEV; 5389 break; 5390 } 5391 if (LPFC_POST_STAGE_PORT_READY == 5392 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 5393 break; 5394 msleep(10); 5395 } 5396 5397 /* 5398 * If there was a port error during POST, then don't proceed with 5399 * other register reads as the data may not be valid. Just exit. 5400 */ 5401 if (port_error) { 5402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5403 "1408 Port Failed POST - portsmphr=0x%x, " 5404 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 5405 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 5406 portsmphr_reg.word0, 5407 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 5408 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 5409 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 5410 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 5411 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 5412 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 5413 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 5414 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 5415 } else { 5416 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5417 "2534 Device Info: SLIFamily=0x%x, " 5418 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 5419 "SLIHint_2=0x%x, FT=0x%x\n", 5420 bf_get(lpfc_sli_intf_sli_family, 5421 &phba->sli4_hba.sli_intf), 5422 bf_get(lpfc_sli_intf_slirev, 5423 &phba->sli4_hba.sli_intf), 5424 bf_get(lpfc_sli_intf_if_type, 5425 &phba->sli4_hba.sli_intf), 5426 bf_get(lpfc_sli_intf_sli_hint1, 5427 &phba->sli4_hba.sli_intf), 5428 bf_get(lpfc_sli_intf_sli_hint2, 5429 &phba->sli4_hba.sli_intf), 5430 bf_get(lpfc_sli_intf_func_type, 5431 &phba->sli4_hba.sli_intf)); 5432 /* 5433 * Check for other Port errors during the initialization 5434 * process. Fail the load if the port did not come up 5435 * correctly. 5436 */ 5437 if_type = bf_get(lpfc_sli_intf_if_type, 5438 &phba->sli4_hba.sli_intf); 5439 switch (if_type) { 5440 case LPFC_SLI_INTF_IF_TYPE_0: 5441 phba->sli4_hba.ue_mask_lo = 5442 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 5443 phba->sli4_hba.ue_mask_hi = 5444 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 5445 uerrlo_reg.word0 = 5446 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 5447 uerrhi_reg.word0 = 5448 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 5449 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 5450 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 5451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5452 "1422 Unrecoverable Error " 5453 "Detected during POST " 5454 "uerr_lo_reg=0x%x, " 5455 "uerr_hi_reg=0x%x, " 5456 "ue_mask_lo_reg=0x%x, " 5457 "ue_mask_hi_reg=0x%x\n", 5458 uerrlo_reg.word0, 5459 uerrhi_reg.word0, 5460 phba->sli4_hba.ue_mask_lo, 5461 phba->sli4_hba.ue_mask_hi); 5462 port_error = -ENODEV; 5463 } 5464 break; 5465 case LPFC_SLI_INTF_IF_TYPE_2: 5466 /* Final checks. The port status should be clean. */ 5467 reg_data.word0 = 5468 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 5469 if (bf_get(lpfc_sliport_status_err, ®_data)) { 5470 phba->work_status[0] = 5471 readl(phba->sli4_hba.u.if_type2. 5472 ERR1regaddr); 5473 phba->work_status[1] = 5474 readl(phba->sli4_hba.u.if_type2. 5475 ERR2regaddr); 5476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5477 "2888 Port Error Detected " 5478 "during POST: " 5479 "port status reg 0x%x, " 5480 "port_smphr reg 0x%x, " 5481 "error 1=0x%x, error 2=0x%x\n", 5482 reg_data.word0, 5483 portsmphr_reg.word0, 5484 phba->work_status[0], 5485 phba->work_status[1]); 5486 port_error = -ENODEV; 5487 } 5488 break; 5489 case LPFC_SLI_INTF_IF_TYPE_1: 5490 default: 5491 break; 5492 } 5493 } 5494 return port_error; 5495 } 5496 5497 /** 5498 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 5499 * @phba: pointer to lpfc hba data structure. 5500 * @if_type: The SLI4 interface type getting configured. 5501 * 5502 * This routine is invoked to set up SLI4 BAR0 PCI config space register 5503 * memory map. 5504 **/ 5505 static void 5506 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 5507 { 5508 switch (if_type) { 5509 case LPFC_SLI_INTF_IF_TYPE_0: 5510 phba->sli4_hba.u.if_type0.UERRLOregaddr = 5511 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 5512 phba->sli4_hba.u.if_type0.UERRHIregaddr = 5513 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 5514 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 5515 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 5516 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 5517 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 5518 phba->sli4_hba.SLIINTFregaddr = 5519 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5520 break; 5521 case LPFC_SLI_INTF_IF_TYPE_2: 5522 phba->sli4_hba.u.if_type2.ERR1regaddr = 5523 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; 5524 phba->sli4_hba.u.if_type2.ERR2regaddr = 5525 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; 5526 phba->sli4_hba.u.if_type2.CTRLregaddr = 5527 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; 5528 phba->sli4_hba.u.if_type2.STATUSregaddr = 5529 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; 5530 phba->sli4_hba.SLIINTFregaddr = 5531 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 5532 phba->sli4_hba.PSMPHRregaddr = 5533 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; 5534 phba->sli4_hba.RQDBregaddr = 5535 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; 5536 phba->sli4_hba.WQDBregaddr = 5537 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; 5538 phba->sli4_hba.EQCQDBregaddr = 5539 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 5540 phba->sli4_hba.MQDBregaddr = 5541 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 5542 phba->sli4_hba.BMBXregaddr = 5543 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 5544 break; 5545 case LPFC_SLI_INTF_IF_TYPE_1: 5546 default: 5547 dev_printk(KERN_ERR, &phba->pcidev->dev, 5548 "FATAL - unsupported SLI4 interface type - %d\n", 5549 if_type); 5550 break; 5551 } 5552 } 5553 5554 /** 5555 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 5556 * @phba: pointer to lpfc hba data structure. 5557 * 5558 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 5559 * memory map. 5560 **/ 5561 static void 5562 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 5563 { 5564 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5565 LPFC_SLIPORT_IF0_SMPHR; 5566 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5567 LPFC_HST_ISR0; 5568 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5569 LPFC_HST_IMR0; 5570 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 5571 LPFC_HST_ISCR0; 5572 } 5573 5574 /** 5575 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 5576 * @phba: pointer to lpfc hba data structure. 5577 * @vf: virtual function number 5578 * 5579 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 5580 * based on the given viftual function number, @vf. 5581 * 5582 * Return 0 if successful, otherwise -ENODEV. 5583 **/ 5584 static int 5585 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 5586 { 5587 if (vf > LPFC_VIR_FUNC_MAX) 5588 return -ENODEV; 5589 5590 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5591 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); 5592 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5593 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); 5594 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5595 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 5596 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5597 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 5598 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 5599 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 5600 return 0; 5601 } 5602 5603 /** 5604 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 5605 * @phba: pointer to lpfc hba data structure. 5606 * 5607 * This routine is invoked to create the bootstrap mailbox 5608 * region consistent with the SLI-4 interface spec. This 5609 * routine allocates all memory necessary to communicate 5610 * mailbox commands to the port and sets up all alignment 5611 * needs. No locks are expected to be held when calling 5612 * this routine. 5613 * 5614 * Return codes 5615 * 0 - successful 5616 * -ENOMEM - could not allocated memory. 5617 **/ 5618 static int 5619 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 5620 { 5621 uint32_t bmbx_size; 5622 struct lpfc_dmabuf *dmabuf; 5623 struct dma_address *dma_address; 5624 uint32_t pa_addr; 5625 uint64_t phys_addr; 5626 5627 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5628 if (!dmabuf) 5629 return -ENOMEM; 5630 5631 /* 5632 * The bootstrap mailbox region is comprised of 2 parts 5633 * plus an alignment restriction of 16 bytes. 5634 */ 5635 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 5636 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5637 bmbx_size, 5638 &dmabuf->phys, 5639 GFP_KERNEL); 5640 if (!dmabuf->virt) { 5641 kfree(dmabuf); 5642 return -ENOMEM; 5643 } 5644 memset(dmabuf->virt, 0, bmbx_size); 5645 5646 /* 5647 * Initialize the bootstrap mailbox pointers now so that the register 5648 * operations are simple later. The mailbox dma address is required 5649 * to be 16-byte aligned. Also align the virtual memory as each 5650 * maibox is copied into the bmbx mailbox region before issuing the 5651 * command to the port. 5652 */ 5653 phba->sli4_hba.bmbx.dmabuf = dmabuf; 5654 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 5655 5656 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 5657 LPFC_ALIGN_16_BYTE); 5658 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 5659 LPFC_ALIGN_16_BYTE); 5660 5661 /* 5662 * Set the high and low physical addresses now. The SLI4 alignment 5663 * requirement is 16 bytes and the mailbox is posted to the port 5664 * as two 30-bit addresses. The other data is a bit marking whether 5665 * the 30-bit address is the high or low address. 5666 * Upcast bmbx aphys to 64bits so shift instruction compiles 5667 * clean on 32 bit machines. 5668 */ 5669 dma_address = &phba->sli4_hba.bmbx.dma_address; 5670 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 5671 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 5672 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 5673 LPFC_BMBX_BIT1_ADDR_HI); 5674 5675 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 5676 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 5677 LPFC_BMBX_BIT1_ADDR_LO); 5678 return 0; 5679 } 5680 5681 /** 5682 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 5683 * @phba: pointer to lpfc hba data structure. 5684 * 5685 * This routine is invoked to teardown the bootstrap mailbox 5686 * region and release all host resources. This routine requires 5687 * the caller to ensure all mailbox commands recovered, no 5688 * additional mailbox comands are sent, and interrupts are disabled 5689 * before calling this routine. 5690 * 5691 **/ 5692 static void 5693 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 5694 { 5695 dma_free_coherent(&phba->pcidev->dev, 5696 phba->sli4_hba.bmbx.bmbx_size, 5697 phba->sli4_hba.bmbx.dmabuf->virt, 5698 phba->sli4_hba.bmbx.dmabuf->phys); 5699 5700 kfree(phba->sli4_hba.bmbx.dmabuf); 5701 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 5702 } 5703 5704 /** 5705 * lpfc_sli4_read_config - Get the config parameters. 5706 * @phba: pointer to lpfc hba data structure. 5707 * 5708 * This routine is invoked to read the configuration parameters from the HBA. 5709 * The configuration parameters are used to set the base and maximum values 5710 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 5711 * allocation for the port. 5712 * 5713 * Return codes 5714 * 0 - successful 5715 * -ENOMEM - No availble memory 5716 * -EIO - The mailbox failed to complete successfully. 5717 **/ 5718 static int 5719 lpfc_sli4_read_config(struct lpfc_hba *phba) 5720 { 5721 LPFC_MBOXQ_t *pmb; 5722 struct lpfc_mbx_read_config *rd_config; 5723 uint32_t rc = 0; 5724 5725 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5726 if (!pmb) { 5727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5728 "2011 Unable to allocate memory for issuing " 5729 "SLI_CONFIG_SPECIAL mailbox command\n"); 5730 return -ENOMEM; 5731 } 5732 5733 lpfc_read_config(phba, pmb); 5734 5735 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 5736 if (rc != MBX_SUCCESS) { 5737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5738 "2012 Mailbox failed , mbxCmd x%x " 5739 "READ_CONFIG, mbxStatus x%x\n", 5740 bf_get(lpfc_mqe_command, &pmb->u.mqe), 5741 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 5742 rc = -EIO; 5743 } else { 5744 rd_config = &pmb->u.mqe.un.rd_config; 5745 phba->sli4_hba.max_cfg_param.max_xri = 5746 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 5747 phba->sli4_hba.max_cfg_param.xri_base = 5748 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 5749 phba->sli4_hba.max_cfg_param.max_vpi = 5750 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 5751 phba->sli4_hba.max_cfg_param.vpi_base = 5752 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 5753 phba->sli4_hba.max_cfg_param.max_rpi = 5754 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 5755 phba->sli4_hba.max_cfg_param.rpi_base = 5756 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 5757 phba->sli4_hba.max_cfg_param.max_vfi = 5758 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 5759 phba->sli4_hba.max_cfg_param.vfi_base = 5760 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 5761 phba->sli4_hba.max_cfg_param.max_fcfi = 5762 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 5763 phba->sli4_hba.max_cfg_param.fcfi_base = 5764 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); 5765 phba->sli4_hba.max_cfg_param.max_eq = 5766 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 5767 phba->sli4_hba.max_cfg_param.max_rq = 5768 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 5769 phba->sli4_hba.max_cfg_param.max_wq = 5770 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 5771 phba->sli4_hba.max_cfg_param.max_cq = 5772 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 5773 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 5774 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 5775 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 5776 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 5777 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; 5778 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 5779 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 5780 phba->max_vports = phba->max_vpi; 5781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5782 "2003 cfg params XRI(B:%d M:%d), " 5783 "VPI(B:%d M:%d) " 5784 "VFI(B:%d M:%d) " 5785 "RPI(B:%d M:%d) " 5786 "FCFI(B:%d M:%d)\n", 5787 phba->sli4_hba.max_cfg_param.xri_base, 5788 phba->sli4_hba.max_cfg_param.max_xri, 5789 phba->sli4_hba.max_cfg_param.vpi_base, 5790 phba->sli4_hba.max_cfg_param.max_vpi, 5791 phba->sli4_hba.max_cfg_param.vfi_base, 5792 phba->sli4_hba.max_cfg_param.max_vfi, 5793 phba->sli4_hba.max_cfg_param.rpi_base, 5794 phba->sli4_hba.max_cfg_param.max_rpi, 5795 phba->sli4_hba.max_cfg_param.fcfi_base, 5796 phba->sli4_hba.max_cfg_param.max_fcfi); 5797 } 5798 mempool_free(pmb, phba->mbox_mem_pool); 5799 5800 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 5801 if (phba->cfg_hba_queue_depth > 5802 (phba->sli4_hba.max_cfg_param.max_xri - 5803 lpfc_sli4_get_els_iocb_cnt(phba))) 5804 phba->cfg_hba_queue_depth = 5805 phba->sli4_hba.max_cfg_param.max_xri - 5806 lpfc_sli4_get_els_iocb_cnt(phba); 5807 return rc; 5808 } 5809 5810 /** 5811 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 5812 * @phba: pointer to lpfc hba data structure. 5813 * 5814 * This routine is invoked to setup the port-side endian order when 5815 * the port if_type is 0. This routine has no function for other 5816 * if_types. 5817 * 5818 * Return codes 5819 * 0 - successful 5820 * -ENOMEM - No availble memory 5821 * -EIO - The mailbox failed to complete successfully. 5822 **/ 5823 static int 5824 lpfc_setup_endian_order(struct lpfc_hba *phba) 5825 { 5826 LPFC_MBOXQ_t *mboxq; 5827 uint32_t if_type, rc = 0; 5828 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 5829 HOST_ENDIAN_HIGH_WORD1}; 5830 5831 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 5832 switch (if_type) { 5833 case LPFC_SLI_INTF_IF_TYPE_0: 5834 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5835 GFP_KERNEL); 5836 if (!mboxq) { 5837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5838 "0492 Unable to allocate memory for " 5839 "issuing SLI_CONFIG_SPECIAL mailbox " 5840 "command\n"); 5841 return -ENOMEM; 5842 } 5843 5844 /* 5845 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 5846 * two words to contain special data values and no other data. 5847 */ 5848 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 5849 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 5850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5851 if (rc != MBX_SUCCESS) { 5852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5853 "0493 SLI_CONFIG_SPECIAL mailbox " 5854 "failed with status x%x\n", 5855 rc); 5856 rc = -EIO; 5857 } 5858 mempool_free(mboxq, phba->mbox_mem_pool); 5859 break; 5860 case LPFC_SLI_INTF_IF_TYPE_2: 5861 case LPFC_SLI_INTF_IF_TYPE_1: 5862 default: 5863 break; 5864 } 5865 return rc; 5866 } 5867 5868 /** 5869 * lpfc_sli4_queue_create - Create all the SLI4 queues 5870 * @phba: pointer to lpfc hba data structure. 5871 * 5872 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 5873 * operation. For each SLI4 queue type, the parameters such as queue entry 5874 * count (queue depth) shall be taken from the module parameter. For now, 5875 * we just use some constant number as place holder. 5876 * 5877 * Return codes 5878 * 0 - successful 5879 * -ENOMEM - No availble memory 5880 * -EIO - The mailbox failed to complete successfully. 5881 **/ 5882 static int 5883 lpfc_sli4_queue_create(struct lpfc_hba *phba) 5884 { 5885 struct lpfc_queue *qdesc; 5886 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 5887 int cfg_fcp_wq_count; 5888 int cfg_fcp_eq_count; 5889 5890 /* 5891 * Sanity check for confiugred queue parameters against the run-time 5892 * device parameters 5893 */ 5894 5895 /* Sanity check on FCP fast-path WQ parameters */ 5896 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 5897 if (cfg_fcp_wq_count > 5898 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 5899 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 5900 LPFC_SP_WQN_DEF; 5901 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 5902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5903 "2581 Not enough WQs (%d) from " 5904 "the pci function for supporting " 5905 "FCP WQs (%d)\n", 5906 phba->sli4_hba.max_cfg_param.max_wq, 5907 phba->cfg_fcp_wq_count); 5908 goto out_error; 5909 } 5910 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5911 "2582 Not enough WQs (%d) from the pci " 5912 "function for supporting the requested " 5913 "FCP WQs (%d), the actual FCP WQs can " 5914 "be supported: %d\n", 5915 phba->sli4_hba.max_cfg_param.max_wq, 5916 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 5917 } 5918 /* The actual number of FCP work queues adopted */ 5919 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 5920 5921 /* Sanity check on FCP fast-path EQ parameters */ 5922 cfg_fcp_eq_count = phba->cfg_fcp_eq_count; 5923 if (cfg_fcp_eq_count > 5924 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { 5925 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - 5926 LPFC_SP_EQN_DEF; 5927 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { 5928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5929 "2574 Not enough EQs (%d) from the " 5930 "pci function for supporting FCP " 5931 "EQs (%d)\n", 5932 phba->sli4_hba.max_cfg_param.max_eq, 5933 phba->cfg_fcp_eq_count); 5934 goto out_error; 5935 } 5936 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5937 "2575 Not enough EQs (%d) from the pci " 5938 "function for supporting the requested " 5939 "FCP EQs (%d), the actual FCP EQs can " 5940 "be supported: %d\n", 5941 phba->sli4_hba.max_cfg_param.max_eq, 5942 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 5943 } 5944 /* It does not make sense to have more EQs than WQs */ 5945 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { 5946 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5947 "2593 The FCP EQ count(%d) cannot be greater " 5948 "than the FCP WQ count(%d), limiting the " 5949 "FCP EQ count to %d\n", cfg_fcp_eq_count, 5950 phba->cfg_fcp_wq_count, 5951 phba->cfg_fcp_wq_count); 5952 cfg_fcp_eq_count = phba->cfg_fcp_wq_count; 5953 } 5954 /* The actual number of FCP event queues adopted */ 5955 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 5956 /* The overall number of event queues used */ 5957 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 5958 5959 /* 5960 * Create Event Queues (EQs) 5961 */ 5962 5963 /* Get EQ depth from module parameter, fake the default for now */ 5964 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 5965 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 5966 5967 /* Create slow path event queue */ 5968 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5969 phba->sli4_hba.eq_ecount); 5970 if (!qdesc) { 5971 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5972 "0496 Failed allocate slow-path EQ\n"); 5973 goto out_error; 5974 } 5975 phba->sli4_hba.sp_eq = qdesc; 5976 5977 /* Create fast-path FCP Event Queue(s) */ 5978 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 5979 phba->cfg_fcp_eq_count), GFP_KERNEL); 5980 if (!phba->sli4_hba.fp_eq) { 5981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5982 "2576 Failed allocate memory for fast-path " 5983 "EQ record array\n"); 5984 goto out_free_sp_eq; 5985 } 5986 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 5987 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 5988 phba->sli4_hba.eq_ecount); 5989 if (!qdesc) { 5990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5991 "0497 Failed allocate fast-path EQ\n"); 5992 goto out_free_fp_eq; 5993 } 5994 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 5995 } 5996 5997 /* 5998 * Create Complete Queues (CQs) 5999 */ 6000 6001 /* Get CQ depth from module parameter, fake the default for now */ 6002 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 6003 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 6004 6005 /* Create slow-path Mailbox Command Complete Queue */ 6006 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6007 phba->sli4_hba.cq_ecount); 6008 if (!qdesc) { 6009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6010 "0500 Failed allocate slow-path mailbox CQ\n"); 6011 goto out_free_fp_eq; 6012 } 6013 phba->sli4_hba.mbx_cq = qdesc; 6014 6015 /* Create slow-path ELS Complete Queue */ 6016 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6017 phba->sli4_hba.cq_ecount); 6018 if (!qdesc) { 6019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6020 "0501 Failed allocate slow-path ELS CQ\n"); 6021 goto out_free_mbx_cq; 6022 } 6023 phba->sli4_hba.els_cq = qdesc; 6024 6025 6026 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6027 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6028 phba->cfg_fcp_eq_count), GFP_KERNEL); 6029 if (!phba->sli4_hba.fcp_cq) { 6030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6031 "2577 Failed allocate memory for fast-path " 6032 "CQ record array\n"); 6033 goto out_free_els_cq; 6034 } 6035 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6036 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6037 phba->sli4_hba.cq_ecount); 6038 if (!qdesc) { 6039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6040 "0499 Failed allocate fast-path FCP " 6041 "CQ (%d)\n", fcp_cqidx); 6042 goto out_free_fcp_cq; 6043 } 6044 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6045 } 6046 6047 /* Create Mailbox Command Queue */ 6048 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6049 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 6050 6051 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6052 phba->sli4_hba.mq_ecount); 6053 if (!qdesc) { 6054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6055 "0505 Failed allocate slow-path MQ\n"); 6056 goto out_free_fcp_cq; 6057 } 6058 phba->sli4_hba.mbx_wq = qdesc; 6059 6060 /* 6061 * Create all the Work Queues (WQs) 6062 */ 6063 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 6064 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 6065 6066 /* Create slow-path ELS Work Queue */ 6067 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6068 phba->sli4_hba.wq_ecount); 6069 if (!qdesc) { 6070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6071 "0504 Failed allocate slow-path ELS WQ\n"); 6072 goto out_free_mbx_wq; 6073 } 6074 phba->sli4_hba.els_wq = qdesc; 6075 6076 /* Create fast-path FCP Work Queue(s) */ 6077 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 6078 phba->cfg_fcp_wq_count), GFP_KERNEL); 6079 if (!phba->sli4_hba.fcp_wq) { 6080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6081 "2578 Failed allocate memory for fast-path " 6082 "WQ record array\n"); 6083 goto out_free_els_wq; 6084 } 6085 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6086 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6087 phba->sli4_hba.wq_ecount); 6088 if (!qdesc) { 6089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6090 "0503 Failed allocate fast-path FCP " 6091 "WQ (%d)\n", fcp_wqidx); 6092 goto out_free_fcp_wq; 6093 } 6094 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; 6095 } 6096 6097 /* 6098 * Create Receive Queue (RQ) 6099 */ 6100 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 6101 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 6102 6103 /* Create Receive Queue for header */ 6104 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6105 phba->sli4_hba.rq_ecount); 6106 if (!qdesc) { 6107 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6108 "0506 Failed allocate receive HRQ\n"); 6109 goto out_free_fcp_wq; 6110 } 6111 phba->sli4_hba.hdr_rq = qdesc; 6112 6113 /* Create Receive Queue for data */ 6114 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6115 phba->sli4_hba.rq_ecount); 6116 if (!qdesc) { 6117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6118 "0507 Failed allocate receive DRQ\n"); 6119 goto out_free_hdr_rq; 6120 } 6121 phba->sli4_hba.dat_rq = qdesc; 6122 6123 return 0; 6124 6125 out_free_hdr_rq: 6126 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6127 phba->sli4_hba.hdr_rq = NULL; 6128 out_free_fcp_wq: 6129 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { 6130 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); 6131 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; 6132 } 6133 kfree(phba->sli4_hba.fcp_wq); 6134 out_free_els_wq: 6135 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6136 phba->sli4_hba.els_wq = NULL; 6137 out_free_mbx_wq: 6138 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6139 phba->sli4_hba.mbx_wq = NULL; 6140 out_free_fcp_cq: 6141 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { 6142 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); 6143 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; 6144 } 6145 kfree(phba->sli4_hba.fcp_cq); 6146 out_free_els_cq: 6147 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6148 phba->sli4_hba.els_cq = NULL; 6149 out_free_mbx_cq: 6150 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6151 phba->sli4_hba.mbx_cq = NULL; 6152 out_free_fp_eq: 6153 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { 6154 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); 6155 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; 6156 } 6157 kfree(phba->sli4_hba.fp_eq); 6158 out_free_sp_eq: 6159 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6160 phba->sli4_hba.sp_eq = NULL; 6161 out_error: 6162 return -ENOMEM; 6163 } 6164 6165 /** 6166 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 6167 * @phba: pointer to lpfc hba data structure. 6168 * 6169 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 6170 * operation. 6171 * 6172 * Return codes 6173 * 0 - successful 6174 * -ENOMEM - No availble memory 6175 * -EIO - The mailbox failed to complete successfully. 6176 **/ 6177 static void 6178 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6179 { 6180 int fcp_qidx; 6181 6182 /* Release mailbox command work queue */ 6183 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6184 phba->sli4_hba.mbx_wq = NULL; 6185 6186 /* Release ELS work queue */ 6187 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6188 phba->sli4_hba.els_wq = NULL; 6189 6190 /* Release FCP work queue */ 6191 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6192 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); 6193 kfree(phba->sli4_hba.fcp_wq); 6194 phba->sli4_hba.fcp_wq = NULL; 6195 6196 /* Release unsolicited receive queue */ 6197 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6198 phba->sli4_hba.hdr_rq = NULL; 6199 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6200 phba->sli4_hba.dat_rq = NULL; 6201 6202 /* Release ELS complete queue */ 6203 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6204 phba->sli4_hba.els_cq = NULL; 6205 6206 /* Release mailbox command complete queue */ 6207 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6208 phba->sli4_hba.mbx_cq = NULL; 6209 6210 /* Release FCP response complete queue */ 6211 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6212 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); 6213 kfree(phba->sli4_hba.fcp_cq); 6214 phba->sli4_hba.fcp_cq = NULL; 6215 6216 /* Release fast-path event queue */ 6217 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6218 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); 6219 kfree(phba->sli4_hba.fp_eq); 6220 phba->sli4_hba.fp_eq = NULL; 6221 6222 /* Release slow-path event queue */ 6223 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); 6224 phba->sli4_hba.sp_eq = NULL; 6225 6226 return; 6227 } 6228 6229 /** 6230 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 6231 * @phba: pointer to lpfc hba data structure. 6232 * 6233 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 6234 * operation. 6235 * 6236 * Return codes 6237 * 0 - successful 6238 * -ENOMEM - No availble memory 6239 * -EIO - The mailbox failed to complete successfully. 6240 **/ 6241 int 6242 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6243 { 6244 int rc = -ENOMEM; 6245 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6246 int fcp_cq_index = 0; 6247 6248 /* 6249 * Set up Event Queues (EQs) 6250 */ 6251 6252 /* Set up slow-path event queue */ 6253 if (!phba->sli4_hba.sp_eq) { 6254 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6255 "0520 Slow-path EQ not allocated\n"); 6256 goto out_error; 6257 } 6258 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, 6259 LPFC_SP_DEF_IMAX); 6260 if (rc) { 6261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6262 "0521 Failed setup of slow-path EQ: " 6263 "rc = 0x%x\n", rc); 6264 goto out_error; 6265 } 6266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6267 "2583 Slow-path EQ setup: queue-id=%d\n", 6268 phba->sli4_hba.sp_eq->queue_id); 6269 6270 /* Set up fast-path event queue */ 6271 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6272 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6274 "0522 Fast-path EQ (%d) not " 6275 "allocated\n", fcp_eqidx); 6276 goto out_destroy_fp_eq; 6277 } 6278 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6279 phba->cfg_fcp_imax); 6280 if (rc) { 6281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6282 "0523 Failed setup of fast-path EQ " 6283 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6284 goto out_destroy_fp_eq; 6285 } 6286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6287 "2584 Fast-path EQ setup: " 6288 "queue[%d]-id=%d\n", fcp_eqidx, 6289 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6290 } 6291 6292 /* 6293 * Set up Complete Queues (CQs) 6294 */ 6295 6296 /* Set up slow-path MBOX Complete Queue as the first CQ */ 6297 if (!phba->sli4_hba.mbx_cq) { 6298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6299 "0528 Mailbox CQ not allocated\n"); 6300 goto out_destroy_fp_eq; 6301 } 6302 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 6303 LPFC_MCQ, LPFC_MBOX); 6304 if (rc) { 6305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6306 "0529 Failed setup of slow-path mailbox CQ: " 6307 "rc = 0x%x\n", rc); 6308 goto out_destroy_fp_eq; 6309 } 6310 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6311 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 6312 phba->sli4_hba.mbx_cq->queue_id, 6313 phba->sli4_hba.sp_eq->queue_id); 6314 6315 /* Set up slow-path ELS Complete Queue */ 6316 if (!phba->sli4_hba.els_cq) { 6317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6318 "0530 ELS CQ not allocated\n"); 6319 goto out_destroy_mbx_cq; 6320 } 6321 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 6322 LPFC_WCQ, LPFC_ELS); 6323 if (rc) { 6324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6325 "0531 Failed setup of slow-path ELS CQ: " 6326 "rc = 0x%x\n", rc); 6327 goto out_destroy_mbx_cq; 6328 } 6329 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6330 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 6331 phba->sli4_hba.els_cq->queue_id, 6332 phba->sli4_hba.sp_eq->queue_id); 6333 6334 /* Set up fast-path FCP Response Complete Queue */ 6335 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6336 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 6337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6338 "0526 Fast-path FCP CQ (%d) not " 6339 "allocated\n", fcp_cqidx); 6340 goto out_destroy_fcp_cq; 6341 } 6342 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 6343 phba->sli4_hba.fp_eq[fcp_cqidx], 6344 LPFC_WCQ, LPFC_FCP); 6345 if (rc) { 6346 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6347 "0527 Failed setup of fast-path FCP " 6348 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 6349 goto out_destroy_fcp_cq; 6350 } 6351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6352 "2588 FCP CQ setup: cq[%d]-id=%d, " 6353 "parent eq[%d]-id=%d\n", 6354 fcp_cqidx, 6355 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 6356 fcp_cqidx, 6357 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); 6358 } 6359 6360 /* 6361 * Set up all the Work Queues (WQs) 6362 */ 6363 6364 /* Set up Mailbox Command Queue */ 6365 if (!phba->sli4_hba.mbx_wq) { 6366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6367 "0538 Slow-path MQ not allocated\n"); 6368 goto out_destroy_fcp_cq; 6369 } 6370 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 6371 phba->sli4_hba.mbx_cq, LPFC_MBOX); 6372 if (rc) { 6373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6374 "0539 Failed setup of slow-path MQ: " 6375 "rc = 0x%x\n", rc); 6376 goto out_destroy_fcp_cq; 6377 } 6378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6379 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 6380 phba->sli4_hba.mbx_wq->queue_id, 6381 phba->sli4_hba.mbx_cq->queue_id); 6382 6383 /* Set up slow-path ELS Work Queue */ 6384 if (!phba->sli4_hba.els_wq) { 6385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6386 "0536 Slow-path ELS WQ not allocated\n"); 6387 goto out_destroy_mbx_wq; 6388 } 6389 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 6390 phba->sli4_hba.els_cq, LPFC_ELS); 6391 if (rc) { 6392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6393 "0537 Failed setup of slow-path ELS WQ: " 6394 "rc = 0x%x\n", rc); 6395 goto out_destroy_mbx_wq; 6396 } 6397 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6398 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 6399 phba->sli4_hba.els_wq->queue_id, 6400 phba->sli4_hba.els_cq->queue_id); 6401 6402 /* Set up fast-path FCP Work Queue */ 6403 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { 6404 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 6405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6406 "0534 Fast-path FCP WQ (%d) not " 6407 "allocated\n", fcp_wqidx); 6408 goto out_destroy_fcp_wq; 6409 } 6410 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 6411 phba->sli4_hba.fcp_cq[fcp_cq_index], 6412 LPFC_FCP); 6413 if (rc) { 6414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6415 "0535 Failed setup of fast-path FCP " 6416 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 6417 goto out_destroy_fcp_wq; 6418 } 6419 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6420 "2591 FCP WQ setup: wq[%d]-id=%d, " 6421 "parent cq[%d]-id=%d\n", 6422 fcp_wqidx, 6423 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 6424 fcp_cq_index, 6425 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); 6426 /* Round robin FCP Work Queue's Completion Queue assignment */ 6427 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); 6428 } 6429 6430 /* 6431 * Create Receive Queue (RQ) 6432 */ 6433 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 6434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6435 "0540 Receive Queue not allocated\n"); 6436 goto out_destroy_fcp_wq; 6437 } 6438 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6439 phba->sli4_hba.els_cq, LPFC_USOL); 6440 if (rc) { 6441 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6442 "0541 Failed setup of Receive Queue: " 6443 "rc = 0x%x\n", rc); 6444 goto out_destroy_fcp_wq; 6445 } 6446 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6447 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6448 "parent cq-id=%d\n", 6449 phba->sli4_hba.hdr_rq->queue_id, 6450 phba->sli4_hba.dat_rq->queue_id, 6451 phba->sli4_hba.els_cq->queue_id); 6452 return 0; 6453 6454 out_destroy_fcp_wq: 6455 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 6456 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 6457 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6458 out_destroy_mbx_wq: 6459 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6460 out_destroy_fcp_cq: 6461 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 6462 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 6463 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6464 out_destroy_mbx_cq: 6465 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6466 out_destroy_fp_eq: 6467 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 6468 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 6469 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6470 out_error: 6471 return rc; 6472 } 6473 6474 /** 6475 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 6476 * @phba: pointer to lpfc hba data structure. 6477 * 6478 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 6479 * operation. 6480 * 6481 * Return codes 6482 * 0 - successful 6483 * -ENOMEM - No availble memory 6484 * -EIO - The mailbox failed to complete successfully. 6485 **/ 6486 void 6487 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 6488 { 6489 int fcp_qidx; 6490 6491 /* Unset mailbox command work queue */ 6492 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 6493 /* Unset ELS work queue */ 6494 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 6495 /* Unset unsolicited receive queue */ 6496 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 6497 /* Unset FCP work queue */ 6498 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 6499 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 6500 /* Unset mailbox command complete queue */ 6501 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 6502 /* Unset ELS complete queue */ 6503 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6504 /* Unset FCP response complete queue */ 6505 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6506 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6507 /* Unset fast-path event queue */ 6508 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6509 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6510 /* Unset slow-path event queue */ 6511 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); 6512 } 6513 6514 /** 6515 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 6516 * @phba: pointer to lpfc hba data structure. 6517 * 6518 * This routine is invoked to allocate and set up a pool of completion queue 6519 * events. The body of the completion queue event is a completion queue entry 6520 * CQE. For now, this pool is used for the interrupt service routine to queue 6521 * the following HBA completion queue events for the worker thread to process: 6522 * - Mailbox asynchronous events 6523 * - Receive queue completion unsolicited events 6524 * Later, this can be used for all the slow-path events. 6525 * 6526 * Return codes 6527 * 0 - successful 6528 * -ENOMEM - No availble memory 6529 **/ 6530 static int 6531 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 6532 { 6533 struct lpfc_cq_event *cq_event; 6534 int i; 6535 6536 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 6537 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 6538 if (!cq_event) 6539 goto out_pool_create_fail; 6540 list_add_tail(&cq_event->list, 6541 &phba->sli4_hba.sp_cqe_event_pool); 6542 } 6543 return 0; 6544 6545 out_pool_create_fail: 6546 lpfc_sli4_cq_event_pool_destroy(phba); 6547 return -ENOMEM; 6548 } 6549 6550 /** 6551 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 6552 * @phba: pointer to lpfc hba data structure. 6553 * 6554 * This routine is invoked to free the pool of completion queue events at 6555 * driver unload time. Note that, it is the responsibility of the driver 6556 * cleanup routine to free all the outstanding completion-queue events 6557 * allocated from this pool back into the pool before invoking this routine 6558 * to destroy the pool. 6559 **/ 6560 static void 6561 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 6562 { 6563 struct lpfc_cq_event *cq_event, *next_cq_event; 6564 6565 list_for_each_entry_safe(cq_event, next_cq_event, 6566 &phba->sli4_hba.sp_cqe_event_pool, list) { 6567 list_del(&cq_event->list); 6568 kfree(cq_event); 6569 } 6570 } 6571 6572 /** 6573 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6574 * @phba: pointer to lpfc hba data structure. 6575 * 6576 * This routine is the lock free version of the API invoked to allocate a 6577 * completion-queue event from the free pool. 6578 * 6579 * Return: Pointer to the newly allocated completion-queue event if successful 6580 * NULL otherwise. 6581 **/ 6582 struct lpfc_cq_event * 6583 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6584 { 6585 struct lpfc_cq_event *cq_event = NULL; 6586 6587 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 6588 struct lpfc_cq_event, list); 6589 return cq_event; 6590 } 6591 6592 /** 6593 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 6594 * @phba: pointer to lpfc hba data structure. 6595 * 6596 * This routine is the lock version of the API invoked to allocate a 6597 * completion-queue event from the free pool. 6598 * 6599 * Return: Pointer to the newly allocated completion-queue event if successful 6600 * NULL otherwise. 6601 **/ 6602 struct lpfc_cq_event * 6603 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 6604 { 6605 struct lpfc_cq_event *cq_event; 6606 unsigned long iflags; 6607 6608 spin_lock_irqsave(&phba->hbalock, iflags); 6609 cq_event = __lpfc_sli4_cq_event_alloc(phba); 6610 spin_unlock_irqrestore(&phba->hbalock, iflags); 6611 return cq_event; 6612 } 6613 6614 /** 6615 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6616 * @phba: pointer to lpfc hba data structure. 6617 * @cq_event: pointer to the completion queue event to be freed. 6618 * 6619 * This routine is the lock free version of the API invoked to release a 6620 * completion-queue event back into the free pool. 6621 **/ 6622 void 6623 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6624 struct lpfc_cq_event *cq_event) 6625 { 6626 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 6627 } 6628 6629 /** 6630 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 6631 * @phba: pointer to lpfc hba data structure. 6632 * @cq_event: pointer to the completion queue event to be freed. 6633 * 6634 * This routine is the lock version of the API invoked to release a 6635 * completion-queue event back into the free pool. 6636 **/ 6637 void 6638 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 6639 struct lpfc_cq_event *cq_event) 6640 { 6641 unsigned long iflags; 6642 spin_lock_irqsave(&phba->hbalock, iflags); 6643 __lpfc_sli4_cq_event_release(phba, cq_event); 6644 spin_unlock_irqrestore(&phba->hbalock, iflags); 6645 } 6646 6647 /** 6648 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 6649 * @phba: pointer to lpfc hba data structure. 6650 * 6651 * This routine is to free all the pending completion-queue events to the 6652 * back into the free pool for device reset. 6653 **/ 6654 static void 6655 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 6656 { 6657 LIST_HEAD(cqelist); 6658 struct lpfc_cq_event *cqe; 6659 unsigned long iflags; 6660 6661 /* Retrieve all the pending WCQEs from pending WCQE lists */ 6662 spin_lock_irqsave(&phba->hbalock, iflags); 6663 /* Pending FCP XRI abort events */ 6664 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 6665 &cqelist); 6666 /* Pending ELS XRI abort events */ 6667 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 6668 &cqelist); 6669 /* Pending asynnc events */ 6670 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 6671 &cqelist); 6672 spin_unlock_irqrestore(&phba->hbalock, iflags); 6673 6674 while (!list_empty(&cqelist)) { 6675 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 6676 lpfc_sli4_cq_event_release(phba, cqe); 6677 } 6678 } 6679 6680 /** 6681 * lpfc_pci_function_reset - Reset pci function. 6682 * @phba: pointer to lpfc hba data structure. 6683 * 6684 * This routine is invoked to request a PCI function reset. It will destroys 6685 * all resources assigned to the PCI function which originates this request. 6686 * 6687 * Return codes 6688 * 0 - successful 6689 * -ENOMEM - No availble memory 6690 * -EIO - The mailbox failed to complete successfully. 6691 **/ 6692 int 6693 lpfc_pci_function_reset(struct lpfc_hba *phba) 6694 { 6695 LPFC_MBOXQ_t *mboxq; 6696 uint32_t rc = 0, if_type; 6697 uint32_t shdr_status, shdr_add_status; 6698 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 6699 union lpfc_sli4_cfg_shdr *shdr; 6700 struct lpfc_register reg_data; 6701 6702 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6703 switch (if_type) { 6704 case LPFC_SLI_INTF_IF_TYPE_0: 6705 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 6706 GFP_KERNEL); 6707 if (!mboxq) { 6708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6709 "0494 Unable to allocate memory for " 6710 "issuing SLI_FUNCTION_RESET mailbox " 6711 "command\n"); 6712 return -ENOMEM; 6713 } 6714 6715 /* Setup PCI function reset mailbox-ioctl command */ 6716 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6717 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 6718 LPFC_SLI4_MBX_EMBED); 6719 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6720 shdr = (union lpfc_sli4_cfg_shdr *) 6721 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6722 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6723 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6724 &shdr->response); 6725 if (rc != MBX_TIMEOUT) 6726 mempool_free(mboxq, phba->mbox_mem_pool); 6727 if (shdr_status || shdr_add_status || rc) { 6728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6729 "0495 SLI_FUNCTION_RESET mailbox " 6730 "failed with status x%x add_status x%x," 6731 " mbx status x%x\n", 6732 shdr_status, shdr_add_status, rc); 6733 rc = -ENXIO; 6734 } 6735 break; 6736 case LPFC_SLI_INTF_IF_TYPE_2: 6737 for (num_resets = 0; 6738 num_resets < MAX_IF_TYPE_2_RESETS; 6739 num_resets++) { 6740 reg_data.word0 = 0; 6741 bf_set(lpfc_sliport_ctrl_end, ®_data, 6742 LPFC_SLIPORT_LITTLE_ENDIAN); 6743 bf_set(lpfc_sliport_ctrl_ip, ®_data, 6744 LPFC_SLIPORT_INIT_PORT); 6745 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 6746 CTRLregaddr); 6747 6748 /* 6749 * Poll the Port Status Register and wait for RDY for 6750 * up to 10 seconds. If the port doesn't respond, treat 6751 * it as an error. If the port responds with RN, start 6752 * the loop again. 6753 */ 6754 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 6755 reg_data.word0 = 6756 readl(phba->sli4_hba.u.if_type2. 6757 STATUSregaddr); 6758 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 6759 break; 6760 if (bf_get(lpfc_sliport_status_rn, ®_data)) { 6761 reset_again++; 6762 break; 6763 } 6764 msleep(10); 6765 } 6766 6767 /* 6768 * If the port responds to the init request with 6769 * reset needed, delay for a bit and restart the loop. 6770 */ 6771 if (reset_again) { 6772 msleep(10); 6773 reset_again = 0; 6774 continue; 6775 } 6776 6777 /* Detect any port errors. */ 6778 reg_data.word0 = readl(phba->sli4_hba.u.if_type2. 6779 STATUSregaddr); 6780 if ((bf_get(lpfc_sliport_status_err, ®_data)) || 6781 (rdy_chk >= 1000)) { 6782 phba->work_status[0] = readl( 6783 phba->sli4_hba.u.if_type2.ERR1regaddr); 6784 phba->work_status[1] = readl( 6785 phba->sli4_hba.u.if_type2.ERR2regaddr); 6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6787 "2890 Port Error Detected " 6788 "during Port Reset: " 6789 "port status reg 0x%x, " 6790 "error 1=0x%x, error 2=0x%x\n", 6791 reg_data.word0, 6792 phba->work_status[0], 6793 phba->work_status[1]); 6794 rc = -ENODEV; 6795 } 6796 6797 /* 6798 * Terminate the outer loop provided the Port indicated 6799 * ready within 10 seconds. 6800 */ 6801 if (rdy_chk < 1000) 6802 break; 6803 } 6804 break; 6805 case LPFC_SLI_INTF_IF_TYPE_1: 6806 default: 6807 break; 6808 } 6809 6810 /* Catch the not-ready port failure after a port reset. */ 6811 if (num_resets >= MAX_IF_TYPE_2_RESETS) 6812 rc = -ENODEV; 6813 6814 return rc; 6815 } 6816 6817 /** 6818 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands 6819 * @phba: pointer to lpfc hba data structure. 6820 * @cnt: number of nop mailbox commands to send. 6821 * 6822 * This routine is invoked to send a number @cnt of NOP mailbox command and 6823 * wait for each command to complete. 6824 * 6825 * Return: the number of NOP mailbox command completed. 6826 **/ 6827 static int 6828 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) 6829 { 6830 LPFC_MBOXQ_t *mboxq; 6831 int length, cmdsent; 6832 uint32_t mbox_tmo; 6833 uint32_t rc = 0; 6834 uint32_t shdr_status, shdr_add_status; 6835 union lpfc_sli4_cfg_shdr *shdr; 6836 6837 if (cnt == 0) { 6838 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6839 "2518 Requested to send 0 NOP mailbox cmd\n"); 6840 return cnt; 6841 } 6842 6843 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6844 if (!mboxq) { 6845 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6846 "2519 Unable to allocate memory for issuing " 6847 "NOP mailbox command\n"); 6848 return 0; 6849 } 6850 6851 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 6852 length = (sizeof(struct lpfc_mbx_nop) - 6853 sizeof(struct lpfc_sli4_cfg_mhdr)); 6854 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 6855 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 6856 6857 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 6858 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 6859 if (!phba->sli4_hba.intr_enable) 6860 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6861 else 6862 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 6863 if (rc == MBX_TIMEOUT) 6864 break; 6865 /* Check return status */ 6866 shdr = (union lpfc_sli4_cfg_shdr *) 6867 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 6868 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6869 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 6870 &shdr->response); 6871 if (shdr_status || shdr_add_status || rc) { 6872 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6873 "2520 NOP mailbox command failed " 6874 "status x%x add_status x%x mbx " 6875 "status x%x\n", shdr_status, 6876 shdr_add_status, rc); 6877 break; 6878 } 6879 } 6880 6881 if (rc != MBX_TIMEOUT) 6882 mempool_free(mboxq, phba->mbox_mem_pool); 6883 6884 return cmdsent; 6885 } 6886 6887 /** 6888 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 6889 * @phba: pointer to lpfc hba data structure. 6890 * 6891 * This routine is invoked to set up the PCI device memory space for device 6892 * with SLI-4 interface spec. 6893 * 6894 * Return codes 6895 * 0 - successful 6896 * other values - error 6897 **/ 6898 static int 6899 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 6900 { 6901 struct pci_dev *pdev; 6902 unsigned long bar0map_len, bar1map_len, bar2map_len; 6903 int error = -ENODEV; 6904 uint32_t if_type; 6905 6906 /* Obtain PCI device reference */ 6907 if (!phba->pcidev) 6908 return error; 6909 else 6910 pdev = phba->pcidev; 6911 6912 /* Set the device DMA mask size */ 6913 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6914 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6915 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6916 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6917 return error; 6918 } 6919 } 6920 6921 /* 6922 * The BARs and register set definitions and offset locations are 6923 * dependent on the if_type. 6924 */ 6925 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 6926 &phba->sli4_hba.sli_intf.word0)) { 6927 return error; 6928 } 6929 6930 /* There is no SLI3 failback for SLI4 devices. */ 6931 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 6932 LPFC_SLI_INTF_VALID) { 6933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6934 "2894 SLI_INTF reg contents invalid " 6935 "sli_intf reg 0x%x\n", 6936 phba->sli4_hba.sli_intf.word0); 6937 return error; 6938 } 6939 6940 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 6941 /* 6942 * Get the bus address of SLI4 device Bar regions and the 6943 * number of bytes required by each mapping. The mapping of the 6944 * particular PCI BARs regions is dependent on the type of 6945 * SLI4 device. 6946 */ 6947 if (pci_resource_start(pdev, 0)) { 6948 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6949 bar0map_len = pci_resource_len(pdev, 0); 6950 6951 /* 6952 * Map SLI4 PCI Config Space Register base to a kernel virtual 6953 * addr 6954 */ 6955 phba->sli4_hba.conf_regs_memmap_p = 6956 ioremap(phba->pci_bar0_map, bar0map_len); 6957 if (!phba->sli4_hba.conf_regs_memmap_p) { 6958 dev_printk(KERN_ERR, &pdev->dev, 6959 "ioremap failed for SLI4 PCI config " 6960 "registers.\n"); 6961 goto out; 6962 } 6963 /* Set up BAR0 PCI config space register memory map */ 6964 lpfc_sli4_bar0_register_memmap(phba, if_type); 6965 } else { 6966 phba->pci_bar0_map = pci_resource_start(pdev, 1); 6967 bar0map_len = pci_resource_len(pdev, 1); 6968 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 6969 dev_printk(KERN_ERR, &pdev->dev, 6970 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 6971 goto out; 6972 } 6973 phba->sli4_hba.conf_regs_memmap_p = 6974 ioremap(phba->pci_bar0_map, bar0map_len); 6975 if (!phba->sli4_hba.conf_regs_memmap_p) { 6976 dev_printk(KERN_ERR, &pdev->dev, 6977 "ioremap failed for SLI4 PCI config " 6978 "registers.\n"); 6979 goto out; 6980 } 6981 lpfc_sli4_bar0_register_memmap(phba, if_type); 6982 } 6983 6984 if (pci_resource_start(pdev, 2)) { 6985 /* 6986 * Map SLI4 if type 0 HBA Control Register base to a kernel 6987 * virtual address and setup the registers. 6988 */ 6989 phba->pci_bar1_map = pci_resource_start(pdev, 2); 6990 bar1map_len = pci_resource_len(pdev, 2); 6991 phba->sli4_hba.ctrl_regs_memmap_p = 6992 ioremap(phba->pci_bar1_map, bar1map_len); 6993 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 6994 dev_printk(KERN_ERR, &pdev->dev, 6995 "ioremap failed for SLI4 HBA control registers.\n"); 6996 goto out_iounmap_conf; 6997 } 6998 lpfc_sli4_bar1_register_memmap(phba); 6999 } 7000 7001 if (pci_resource_start(pdev, 4)) { 7002 /* 7003 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 7004 * virtual address and setup the registers. 7005 */ 7006 phba->pci_bar2_map = pci_resource_start(pdev, 4); 7007 bar2map_len = pci_resource_len(pdev, 4); 7008 phba->sli4_hba.drbl_regs_memmap_p = 7009 ioremap(phba->pci_bar2_map, bar2map_len); 7010 if (!phba->sli4_hba.drbl_regs_memmap_p) { 7011 dev_printk(KERN_ERR, &pdev->dev, 7012 "ioremap failed for SLI4 HBA doorbell registers.\n"); 7013 goto out_iounmap_ctrl; 7014 } 7015 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 7016 if (error) 7017 goto out_iounmap_all; 7018 } 7019 7020 return 0; 7021 7022 out_iounmap_all: 7023 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7024 out_iounmap_ctrl: 7025 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7026 out_iounmap_conf: 7027 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7028 out: 7029 return error; 7030 } 7031 7032 /** 7033 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 7034 * @phba: pointer to lpfc hba data structure. 7035 * 7036 * This routine is invoked to unset the PCI device memory space for device 7037 * with SLI-4 interface spec. 7038 **/ 7039 static void 7040 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 7041 { 7042 struct pci_dev *pdev; 7043 7044 /* Obtain PCI device reference */ 7045 if (!phba->pcidev) 7046 return; 7047 else 7048 pdev = phba->pcidev; 7049 7050 /* Free coherent DMA memory allocated */ 7051 7052 /* Unmap I/O memory space */ 7053 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 7054 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 7055 iounmap(phba->sli4_hba.conf_regs_memmap_p); 7056 7057 return; 7058 } 7059 7060 /** 7061 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 7062 * @phba: pointer to lpfc hba data structure. 7063 * 7064 * This routine is invoked to enable the MSI-X interrupt vectors to device 7065 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 7066 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 7067 * invoked, enables either all or nothing, depending on the current 7068 * availability of PCI vector resources. The device driver is responsible 7069 * for calling the individual request_irq() to register each MSI-X vector 7070 * with a interrupt handler, which is done in this function. Note that 7071 * later when device is unloading, the driver should always call free_irq() 7072 * on all MSI-X vectors it has done request_irq() on before calling 7073 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 7074 * will be left with MSI-X enabled and leaks its vectors. 7075 * 7076 * Return codes 7077 * 0 - successful 7078 * other values - error 7079 **/ 7080 static int 7081 lpfc_sli_enable_msix(struct lpfc_hba *phba) 7082 { 7083 int rc, i; 7084 LPFC_MBOXQ_t *pmb; 7085 7086 /* Set up MSI-X multi-message vectors */ 7087 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7088 phba->msix_entries[i].entry = i; 7089 7090 /* Configure MSI-X capability structure */ 7091 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 7092 ARRAY_SIZE(phba->msix_entries)); 7093 if (rc) { 7094 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7095 "0420 PCI enable MSI-X failed (%d)\n", rc); 7096 goto msi_fail_out; 7097 } 7098 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7099 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7100 "0477 MSI-X entry[%d]: vector=x%x " 7101 "message=%d\n", i, 7102 phba->msix_entries[i].vector, 7103 phba->msix_entries[i].entry); 7104 /* 7105 * Assign MSI-X vectors to interrupt handlers 7106 */ 7107 7108 /* vector-0 is associated to slow-path handler */ 7109 rc = request_irq(phba->msix_entries[0].vector, 7110 &lpfc_sli_sp_intr_handler, IRQF_SHARED, 7111 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7112 if (rc) { 7113 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7114 "0421 MSI-X slow-path request_irq failed " 7115 "(%d)\n", rc); 7116 goto msi_fail_out; 7117 } 7118 7119 /* vector-1 is associated to fast-path handler */ 7120 rc = request_irq(phba->msix_entries[1].vector, 7121 &lpfc_sli_fp_intr_handler, IRQF_SHARED, 7122 LPFC_FP_DRIVER_HANDLER_NAME, phba); 7123 7124 if (rc) { 7125 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7126 "0429 MSI-X fast-path request_irq failed " 7127 "(%d)\n", rc); 7128 goto irq_fail_out; 7129 } 7130 7131 /* 7132 * Configure HBA MSI-X attention conditions to messages 7133 */ 7134 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7135 7136 if (!pmb) { 7137 rc = -ENOMEM; 7138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7139 "0474 Unable to allocate memory for issuing " 7140 "MBOX_CONFIG_MSI command\n"); 7141 goto mem_fail_out; 7142 } 7143 rc = lpfc_config_msi(phba, pmb); 7144 if (rc) 7145 goto mbx_fail_out; 7146 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7147 if (rc != MBX_SUCCESS) { 7148 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 7149 "0351 Config MSI mailbox command failed, " 7150 "mbxCmd x%x, mbxStatus x%x\n", 7151 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 7152 goto mbx_fail_out; 7153 } 7154 7155 /* Free memory allocated for mailbox command */ 7156 mempool_free(pmb, phba->mbox_mem_pool); 7157 return rc; 7158 7159 mbx_fail_out: 7160 /* Free memory allocated for mailbox command */ 7161 mempool_free(pmb, phba->mbox_mem_pool); 7162 7163 mem_fail_out: 7164 /* free the irq already requested */ 7165 free_irq(phba->msix_entries[1].vector, phba); 7166 7167 irq_fail_out: 7168 /* free the irq already requested */ 7169 free_irq(phba->msix_entries[0].vector, phba); 7170 7171 msi_fail_out: 7172 /* Unconfigure MSI-X capability structure */ 7173 pci_disable_msix(phba->pcidev); 7174 return rc; 7175 } 7176 7177 /** 7178 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 7179 * @phba: pointer to lpfc hba data structure. 7180 * 7181 * This routine is invoked to release the MSI-X vectors and then disable the 7182 * MSI-X interrupt mode to device with SLI-3 interface spec. 7183 **/ 7184 static void 7185 lpfc_sli_disable_msix(struct lpfc_hba *phba) 7186 { 7187 int i; 7188 7189 /* Free up MSI-X multi-message vectors */ 7190 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 7191 free_irq(phba->msix_entries[i].vector, phba); 7192 /* Disable MSI-X */ 7193 pci_disable_msix(phba->pcidev); 7194 7195 return; 7196 } 7197 7198 /** 7199 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 7200 * @phba: pointer to lpfc hba data structure. 7201 * 7202 * This routine is invoked to enable the MSI interrupt mode to device with 7203 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 7204 * enable the MSI vector. The device driver is responsible for calling the 7205 * request_irq() to register MSI vector with a interrupt the handler, which 7206 * is done in this function. 7207 * 7208 * Return codes 7209 * 0 - successful 7210 * other values - error 7211 */ 7212 static int 7213 lpfc_sli_enable_msi(struct lpfc_hba *phba) 7214 { 7215 int rc; 7216 7217 rc = pci_enable_msi(phba->pcidev); 7218 if (!rc) 7219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7220 "0462 PCI enable MSI mode success.\n"); 7221 else { 7222 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7223 "0471 PCI enable MSI mode failed (%d)\n", rc); 7224 return rc; 7225 } 7226 7227 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7228 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7229 if (rc) { 7230 pci_disable_msi(phba->pcidev); 7231 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7232 "0478 MSI request_irq failed (%d)\n", rc); 7233 } 7234 return rc; 7235 } 7236 7237 /** 7238 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 7239 * @phba: pointer to lpfc hba data structure. 7240 * 7241 * This routine is invoked to disable the MSI interrupt mode to device with 7242 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 7243 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7244 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7245 * its vector. 7246 */ 7247 static void 7248 lpfc_sli_disable_msi(struct lpfc_hba *phba) 7249 { 7250 free_irq(phba->pcidev->irq, phba); 7251 pci_disable_msi(phba->pcidev); 7252 return; 7253 } 7254 7255 /** 7256 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 7257 * @phba: pointer to lpfc hba data structure. 7258 * 7259 * This routine is invoked to enable device interrupt and associate driver's 7260 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 7261 * spec. Depends on the interrupt mode configured to the driver, the driver 7262 * will try to fallback from the configured interrupt mode to an interrupt 7263 * mode which is supported by the platform, kernel, and device in the order 7264 * of: 7265 * MSI-X -> MSI -> IRQ. 7266 * 7267 * Return codes 7268 * 0 - successful 7269 * other values - error 7270 **/ 7271 static uint32_t 7272 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7273 { 7274 uint32_t intr_mode = LPFC_INTR_ERROR; 7275 int retval; 7276 7277 if (cfg_mode == 2) { 7278 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 7279 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 7280 if (!retval) { 7281 /* Now, try to enable MSI-X interrupt mode */ 7282 retval = lpfc_sli_enable_msix(phba); 7283 if (!retval) { 7284 /* Indicate initialization to MSI-X mode */ 7285 phba->intr_type = MSIX; 7286 intr_mode = 2; 7287 } 7288 } 7289 } 7290 7291 /* Fallback to MSI if MSI-X initialization failed */ 7292 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7293 retval = lpfc_sli_enable_msi(phba); 7294 if (!retval) { 7295 /* Indicate initialization to MSI mode */ 7296 phba->intr_type = MSI; 7297 intr_mode = 1; 7298 } 7299 } 7300 7301 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7302 if (phba->intr_type == NONE) { 7303 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 7304 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7305 if (!retval) { 7306 /* Indicate initialization to INTx mode */ 7307 phba->intr_type = INTx; 7308 intr_mode = 0; 7309 } 7310 } 7311 return intr_mode; 7312 } 7313 7314 /** 7315 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 7316 * @phba: pointer to lpfc hba data structure. 7317 * 7318 * This routine is invoked to disable device interrupt and disassociate the 7319 * driver's interrupt handler(s) from interrupt vector(s) to device with 7320 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 7321 * release the interrupt vector(s) for the message signaled interrupt. 7322 **/ 7323 static void 7324 lpfc_sli_disable_intr(struct lpfc_hba *phba) 7325 { 7326 /* Disable the currently initialized interrupt mode */ 7327 if (phba->intr_type == MSIX) 7328 lpfc_sli_disable_msix(phba); 7329 else if (phba->intr_type == MSI) 7330 lpfc_sli_disable_msi(phba); 7331 else if (phba->intr_type == INTx) 7332 free_irq(phba->pcidev->irq, phba); 7333 7334 /* Reset interrupt management states */ 7335 phba->intr_type = NONE; 7336 phba->sli.slistat.sli_intr = 0; 7337 7338 return; 7339 } 7340 7341 /** 7342 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 7343 * @phba: pointer to lpfc hba data structure. 7344 * 7345 * This routine is invoked to enable the MSI-X interrupt vectors to device 7346 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 7347 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 7348 * enables either all or nothing, depending on the current availability of 7349 * PCI vector resources. The device driver is responsible for calling the 7350 * individual request_irq() to register each MSI-X vector with a interrupt 7351 * handler, which is done in this function. Note that later when device is 7352 * unloading, the driver should always call free_irq() on all MSI-X vectors 7353 * it has done request_irq() on before calling pci_disable_msix(). Failure 7354 * to do so results in a BUG_ON() and a device will be left with MSI-X 7355 * enabled and leaks its vectors. 7356 * 7357 * Return codes 7358 * 0 - successful 7359 * other values - error 7360 **/ 7361 static int 7362 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 7363 { 7364 int vectors, rc, index; 7365 7366 /* Set up MSI-X multi-message vectors */ 7367 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) 7368 phba->sli4_hba.msix_entries[index].entry = index; 7369 7370 /* Configure MSI-X capability structure */ 7371 vectors = phba->sli4_hba.cfg_eqn; 7372 enable_msix_vectors: 7373 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 7374 vectors); 7375 if (rc > 1) { 7376 vectors = rc; 7377 goto enable_msix_vectors; 7378 } else if (rc) { 7379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7380 "0484 PCI enable MSI-X failed (%d)\n", rc); 7381 goto msi_fail_out; 7382 } 7383 7384 /* Log MSI-X vector assignment */ 7385 for (index = 0; index < vectors; index++) 7386 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7387 "0489 MSI-X entry[%d]: vector=x%x " 7388 "message=%d\n", index, 7389 phba->sli4_hba.msix_entries[index].vector, 7390 phba->sli4_hba.msix_entries[index].entry); 7391 /* 7392 * Assign MSI-X vectors to interrupt handlers 7393 */ 7394 7395 /* The first vector must associated to slow-path handler for MQ */ 7396 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 7397 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 7398 LPFC_SP_DRIVER_HANDLER_NAME, phba); 7399 if (rc) { 7400 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7401 "0485 MSI-X slow-path request_irq failed " 7402 "(%d)\n", rc); 7403 goto msi_fail_out; 7404 } 7405 7406 /* The rest of the vector(s) are associated to fast-path handler(s) */ 7407 for (index = 1; index < vectors; index++) { 7408 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 7409 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; 7410 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 7411 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 7412 LPFC_FP_DRIVER_HANDLER_NAME, 7413 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7414 if (rc) { 7415 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7416 "0486 MSI-X fast-path (%d) " 7417 "request_irq failed (%d)\n", index, rc); 7418 goto cfg_fail_out; 7419 } 7420 } 7421 phba->sli4_hba.msix_vec_nr = vectors; 7422 7423 return rc; 7424 7425 cfg_fail_out: 7426 /* free the irq already requested */ 7427 for (--index; index >= 1; index--) 7428 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 7429 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7430 7431 /* free the irq already requested */ 7432 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7433 7434 msi_fail_out: 7435 /* Unconfigure MSI-X capability structure */ 7436 pci_disable_msix(phba->pcidev); 7437 return rc; 7438 } 7439 7440 /** 7441 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 7442 * @phba: pointer to lpfc hba data structure. 7443 * 7444 * This routine is invoked to release the MSI-X vectors and then disable the 7445 * MSI-X interrupt mode to device with SLI-4 interface spec. 7446 **/ 7447 static void 7448 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 7449 { 7450 int index; 7451 7452 /* Free up MSI-X multi-message vectors */ 7453 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 7454 7455 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++) 7456 free_irq(phba->sli4_hba.msix_entries[index].vector, 7457 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 7458 7459 /* Disable MSI-X */ 7460 pci_disable_msix(phba->pcidev); 7461 7462 return; 7463 } 7464 7465 /** 7466 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 7467 * @phba: pointer to lpfc hba data structure. 7468 * 7469 * This routine is invoked to enable the MSI interrupt mode to device with 7470 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 7471 * to enable the MSI vector. The device driver is responsible for calling 7472 * the request_irq() to register MSI vector with a interrupt the handler, 7473 * which is done in this function. 7474 * 7475 * Return codes 7476 * 0 - successful 7477 * other values - error 7478 **/ 7479 static int 7480 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 7481 { 7482 int rc, index; 7483 7484 rc = pci_enable_msi(phba->pcidev); 7485 if (!rc) 7486 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7487 "0487 PCI enable MSI mode success.\n"); 7488 else { 7489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7490 "0488 PCI enable MSI mode failed (%d)\n", rc); 7491 return rc; 7492 } 7493 7494 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7495 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7496 if (rc) { 7497 pci_disable_msi(phba->pcidev); 7498 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7499 "0490 MSI request_irq failed (%d)\n", rc); 7500 return rc; 7501 } 7502 7503 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 7504 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7505 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7506 } 7507 7508 return 0; 7509 } 7510 7511 /** 7512 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 7513 * @phba: pointer to lpfc hba data structure. 7514 * 7515 * This routine is invoked to disable the MSI interrupt mode to device with 7516 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 7517 * done request_irq() on before calling pci_disable_msi(). Failure to do so 7518 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 7519 * its vector. 7520 **/ 7521 static void 7522 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 7523 { 7524 free_irq(phba->pcidev->irq, phba); 7525 pci_disable_msi(phba->pcidev); 7526 return; 7527 } 7528 7529 /** 7530 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 7531 * @phba: pointer to lpfc hba data structure. 7532 * 7533 * This routine is invoked to enable device interrupt and associate driver's 7534 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 7535 * interface spec. Depends on the interrupt mode configured to the driver, 7536 * the driver will try to fallback from the configured interrupt mode to an 7537 * interrupt mode which is supported by the platform, kernel, and device in 7538 * the order of: 7539 * MSI-X -> MSI -> IRQ. 7540 * 7541 * Return codes 7542 * 0 - successful 7543 * other values - error 7544 **/ 7545 static uint32_t 7546 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 7547 { 7548 uint32_t intr_mode = LPFC_INTR_ERROR; 7549 int retval, index; 7550 7551 if (cfg_mode == 2) { 7552 /* Preparation before conf_msi mbox cmd */ 7553 retval = 0; 7554 if (!retval) { 7555 /* Now, try to enable MSI-X interrupt mode */ 7556 retval = lpfc_sli4_enable_msix(phba); 7557 if (!retval) { 7558 /* Indicate initialization to MSI-X mode */ 7559 phba->intr_type = MSIX; 7560 intr_mode = 2; 7561 } 7562 } 7563 } 7564 7565 /* Fallback to MSI if MSI-X initialization failed */ 7566 if (cfg_mode >= 1 && phba->intr_type == NONE) { 7567 retval = lpfc_sli4_enable_msi(phba); 7568 if (!retval) { 7569 /* Indicate initialization to MSI mode */ 7570 phba->intr_type = MSI; 7571 intr_mode = 1; 7572 } 7573 } 7574 7575 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 7576 if (phba->intr_type == NONE) { 7577 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 7578 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 7579 if (!retval) { 7580 /* Indicate initialization to INTx mode */ 7581 phba->intr_type = INTx; 7582 intr_mode = 0; 7583 for (index = 0; index < phba->cfg_fcp_eq_count; 7584 index++) { 7585 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 7586 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 7587 } 7588 } 7589 } 7590 return intr_mode; 7591 } 7592 7593 /** 7594 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 7595 * @phba: pointer to lpfc hba data structure. 7596 * 7597 * This routine is invoked to disable device interrupt and disassociate 7598 * the driver's interrupt handler(s) from interrupt vector(s) to device 7599 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 7600 * will release the interrupt vector(s) for the message signaled interrupt. 7601 **/ 7602 static void 7603 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 7604 { 7605 /* Disable the currently initialized interrupt mode */ 7606 if (phba->intr_type == MSIX) 7607 lpfc_sli4_disable_msix(phba); 7608 else if (phba->intr_type == MSI) 7609 lpfc_sli4_disable_msi(phba); 7610 else if (phba->intr_type == INTx) 7611 free_irq(phba->pcidev->irq, phba); 7612 7613 /* Reset interrupt management states */ 7614 phba->intr_type = NONE; 7615 phba->sli.slistat.sli_intr = 0; 7616 7617 return; 7618 } 7619 7620 /** 7621 * lpfc_unset_hba - Unset SLI3 hba device initialization 7622 * @phba: pointer to lpfc hba data structure. 7623 * 7624 * This routine is invoked to unset the HBA device initialization steps to 7625 * a device with SLI-3 interface spec. 7626 **/ 7627 static void 7628 lpfc_unset_hba(struct lpfc_hba *phba) 7629 { 7630 struct lpfc_vport *vport = phba->pport; 7631 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7632 7633 spin_lock_irq(shost->host_lock); 7634 vport->load_flag |= FC_UNLOADING; 7635 spin_unlock_irq(shost->host_lock); 7636 7637 lpfc_stop_hba_timers(phba); 7638 7639 phba->pport->work_port_events = 0; 7640 7641 lpfc_sli_hba_down(phba); 7642 7643 lpfc_sli_brdrestart(phba); 7644 7645 lpfc_sli_disable_intr(phba); 7646 7647 return; 7648 } 7649 7650 /** 7651 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. 7652 * @phba: pointer to lpfc hba data structure. 7653 * 7654 * This routine is invoked to unset the HBA device initialization steps to 7655 * a device with SLI-4 interface spec. 7656 **/ 7657 static void 7658 lpfc_sli4_unset_hba(struct lpfc_hba *phba) 7659 { 7660 struct lpfc_vport *vport = phba->pport; 7661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7662 7663 spin_lock_irq(shost->host_lock); 7664 vport->load_flag |= FC_UNLOADING; 7665 spin_unlock_irq(shost->host_lock); 7666 7667 phba->pport->work_port_events = 0; 7668 7669 /* Stop the SLI4 device port */ 7670 lpfc_stop_port(phba); 7671 7672 lpfc_sli4_disable_intr(phba); 7673 7674 /* Reset SLI4 HBA FCoE function */ 7675 lpfc_pci_function_reset(phba); 7676 7677 return; 7678 } 7679 7680 /** 7681 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 7682 * @phba: Pointer to HBA context object. 7683 * 7684 * This function is called in the SLI4 code path to wait for completion 7685 * of device's XRIs exchange busy. It will check the XRI exchange busy 7686 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 7687 * that, it will check the XRI exchange busy on outstanding FCP and ELS 7688 * I/Os every 30 seconds, log error message, and wait forever. Only when 7689 * all XRI exchange busy complete, the driver unload shall proceed with 7690 * invoking the function reset ioctl mailbox command to the CNA and the 7691 * the rest of the driver unload resource release. 7692 **/ 7693 static void 7694 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 7695 { 7696 int wait_time = 0; 7697 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7698 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7699 7700 while (!fcp_xri_cmpl || !els_xri_cmpl) { 7701 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 7702 if (!fcp_xri_cmpl) 7703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7704 "2877 FCP XRI exchange busy " 7705 "wait time: %d seconds.\n", 7706 wait_time/1000); 7707 if (!els_xri_cmpl) 7708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7709 "2878 ELS XRI exchange busy " 7710 "wait time: %d seconds.\n", 7711 wait_time/1000); 7712 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 7713 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 7714 } else { 7715 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 7716 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 7717 } 7718 fcp_xri_cmpl = 7719 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 7720 els_xri_cmpl = 7721 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 7722 } 7723 } 7724 7725 /** 7726 * lpfc_sli4_hba_unset - Unset the fcoe hba 7727 * @phba: Pointer to HBA context object. 7728 * 7729 * This function is called in the SLI4 code path to reset the HBA's FCoE 7730 * function. The caller is not required to hold any lock. This routine 7731 * issues PCI function reset mailbox command to reset the FCoE function. 7732 * At the end of the function, it calls lpfc_hba_down_post function to 7733 * free any pending commands. 7734 **/ 7735 static void 7736 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 7737 { 7738 int wait_cnt = 0; 7739 LPFC_MBOXQ_t *mboxq; 7740 7741 lpfc_stop_hba_timers(phba); 7742 phba->sli4_hba.intr_enable = 0; 7743 7744 /* 7745 * Gracefully wait out the potential current outstanding asynchronous 7746 * mailbox command. 7747 */ 7748 7749 /* First, block any pending async mailbox command from posted */ 7750 spin_lock_irq(&phba->hbalock); 7751 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7752 spin_unlock_irq(&phba->hbalock); 7753 /* Now, trying to wait it out if we can */ 7754 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7755 msleep(10); 7756 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 7757 break; 7758 } 7759 /* Forcefully release the outstanding mailbox command if timed out */ 7760 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7761 spin_lock_irq(&phba->hbalock); 7762 mboxq = phba->sli.mbox_active; 7763 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7764 __lpfc_mbox_cmpl_put(phba, mboxq); 7765 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7766 phba->sli.mbox_active = NULL; 7767 spin_unlock_irq(&phba->hbalock); 7768 } 7769 7770 /* Abort all iocbs associated with the hba */ 7771 lpfc_sli_hba_iocb_abort(phba); 7772 7773 /* Wait for completion of device XRI exchange busy */ 7774 lpfc_sli4_xri_exchange_busy_wait(phba); 7775 7776 /* Disable PCI subsystem interrupt */ 7777 lpfc_sli4_disable_intr(phba); 7778 7779 /* Stop kthread signal shall trigger work_done one more time */ 7780 kthread_stop(phba->worker_thread); 7781 7782 /* Reset SLI4 HBA FCoE function */ 7783 lpfc_pci_function_reset(phba); 7784 7785 /* Stop the SLI4 device port */ 7786 phba->pport->work_port_events = 0; 7787 } 7788 7789 /** 7790 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 7791 * @phba: Pointer to HBA context object. 7792 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 7793 * 7794 * This function is called in the SLI4 code path to read the port's 7795 * sli4 capabilities. 7796 * 7797 * This function may be be called from any context that can block-wait 7798 * for the completion. The expectation is that this routine is called 7799 * typically from probe_one or from the online routine. 7800 **/ 7801 int 7802 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7803 { 7804 int rc; 7805 struct lpfc_mqe *mqe; 7806 struct lpfc_pc_sli4_params *sli4_params; 7807 uint32_t mbox_tmo; 7808 7809 rc = 0; 7810 mqe = &mboxq->u.mqe; 7811 7812 /* Read the port's SLI4 Parameters port capabilities */ 7813 lpfc_sli4_params(mboxq); 7814 if (!phba->sli4_hba.intr_enable) 7815 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7816 else { 7817 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 7818 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7819 } 7820 7821 if (unlikely(rc)) 7822 return 1; 7823 7824 sli4_params = &phba->sli4_hba.pc_sli4_params; 7825 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 7826 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 7827 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 7828 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 7829 &mqe->un.sli4_params); 7830 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 7831 &mqe->un.sli4_params); 7832 sli4_params->proto_types = mqe->un.sli4_params.word3; 7833 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 7834 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 7835 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 7836 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 7837 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 7838 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 7839 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 7840 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 7841 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 7842 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 7843 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 7844 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 7845 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 7846 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 7847 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 7848 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 7849 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 7850 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 7851 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 7852 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 7853 return rc; 7854 } 7855 7856 /** 7857 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 7858 * @pdev: pointer to PCI device 7859 * @pid: pointer to PCI device identifier 7860 * 7861 * This routine is to be called to attach a device with SLI-3 interface spec 7862 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 7863 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 7864 * information of the device and driver to see if the driver state that it can 7865 * support this kind of device. If the match is successful, the driver core 7866 * invokes this routine. If this routine determines it can claim the HBA, it 7867 * does all the initialization that it needs to do to handle the HBA properly. 7868 * 7869 * Return code 7870 * 0 - driver can claim the device 7871 * negative value - driver can not claim the device 7872 **/ 7873 static int __devinit 7874 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 7875 { 7876 struct lpfc_hba *phba; 7877 struct lpfc_vport *vport = NULL; 7878 struct Scsi_Host *shost = NULL; 7879 int error; 7880 uint32_t cfg_mode, intr_mode; 7881 7882 /* Allocate memory for HBA structure */ 7883 phba = lpfc_hba_alloc(pdev); 7884 if (!phba) 7885 return -ENOMEM; 7886 7887 /* Perform generic PCI device enabling operation */ 7888 error = lpfc_enable_pci_dev(phba); 7889 if (error) { 7890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7891 "1401 Failed to enable pci device.\n"); 7892 goto out_free_phba; 7893 } 7894 7895 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 7896 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 7897 if (error) 7898 goto out_disable_pci_dev; 7899 7900 /* Set up SLI-3 specific device PCI memory space */ 7901 error = lpfc_sli_pci_mem_setup(phba); 7902 if (error) { 7903 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7904 "1402 Failed to set up pci memory space.\n"); 7905 goto out_disable_pci_dev; 7906 } 7907 7908 /* Set up phase-1 common device driver resources */ 7909 error = lpfc_setup_driver_resource_phase1(phba); 7910 if (error) { 7911 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7912 "1403 Failed to set up driver resource.\n"); 7913 goto out_unset_pci_mem_s3; 7914 } 7915 7916 /* Set up SLI-3 specific device driver resources */ 7917 error = lpfc_sli_driver_resource_setup(phba); 7918 if (error) { 7919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7920 "1404 Failed to set up driver resource.\n"); 7921 goto out_unset_pci_mem_s3; 7922 } 7923 7924 /* Initialize and populate the iocb list per host */ 7925 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 7926 if (error) { 7927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7928 "1405 Failed to initialize iocb list.\n"); 7929 goto out_unset_driver_resource_s3; 7930 } 7931 7932 /* Set up common device driver resources */ 7933 error = lpfc_setup_driver_resource_phase2(phba); 7934 if (error) { 7935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7936 "1406 Failed to set up driver resource.\n"); 7937 goto out_free_iocb_list; 7938 } 7939 7940 /* Create SCSI host to the physical port */ 7941 error = lpfc_create_shost(phba); 7942 if (error) { 7943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7944 "1407 Failed to create scsi host.\n"); 7945 goto out_unset_driver_resource; 7946 } 7947 7948 /* Configure sysfs attributes */ 7949 vport = phba->pport; 7950 error = lpfc_alloc_sysfs_attr(vport); 7951 if (error) { 7952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7953 "1476 Failed to allocate sysfs attr\n"); 7954 goto out_destroy_shost; 7955 } 7956 7957 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 7958 /* Now, trying to enable interrupt and bring up the device */ 7959 cfg_mode = phba->cfg_use_msi; 7960 while (true) { 7961 /* Put device to a known state before enabling interrupt */ 7962 lpfc_stop_port(phba); 7963 /* Configure and enable interrupt */ 7964 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 7965 if (intr_mode == LPFC_INTR_ERROR) { 7966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7967 "0431 Failed to enable interrupt.\n"); 7968 error = -ENODEV; 7969 goto out_free_sysfs_attr; 7970 } 7971 /* SLI-3 HBA setup */ 7972 if (lpfc_sli_hba_setup(phba)) { 7973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7974 "1477 Failed to set up hba\n"); 7975 error = -ENODEV; 7976 goto out_remove_device; 7977 } 7978 7979 /* Wait 50ms for the interrupts of previous mailbox commands */ 7980 msleep(50); 7981 /* Check active interrupts on message signaled interrupts */ 7982 if (intr_mode == 0 || 7983 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 7984 /* Log the current active interrupt mode */ 7985 phba->intr_mode = intr_mode; 7986 lpfc_log_intr_mode(phba, intr_mode); 7987 break; 7988 } else { 7989 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7990 "0447 Configure interrupt mode (%d) " 7991 "failed active interrupt test.\n", 7992 intr_mode); 7993 /* Disable the current interrupt mode */ 7994 lpfc_sli_disable_intr(phba); 7995 /* Try next level of interrupt mode */ 7996 cfg_mode = --intr_mode; 7997 } 7998 } 7999 8000 /* Perform post initialization setup */ 8001 lpfc_post_init_setup(phba); 8002 8003 /* Check if there are static vports to be created. */ 8004 lpfc_create_static_vport(phba); 8005 8006 return 0; 8007 8008 out_remove_device: 8009 lpfc_unset_hba(phba); 8010 out_free_sysfs_attr: 8011 lpfc_free_sysfs_attr(vport); 8012 out_destroy_shost: 8013 lpfc_destroy_shost(phba); 8014 out_unset_driver_resource: 8015 lpfc_unset_driver_resource_phase2(phba); 8016 out_free_iocb_list: 8017 lpfc_free_iocb_list(phba); 8018 out_unset_driver_resource_s3: 8019 lpfc_sli_driver_resource_unset(phba); 8020 out_unset_pci_mem_s3: 8021 lpfc_sli_pci_mem_unset(phba); 8022 out_disable_pci_dev: 8023 lpfc_disable_pci_dev(phba); 8024 if (shost) 8025 scsi_host_put(shost); 8026 out_free_phba: 8027 lpfc_hba_free(phba); 8028 return error; 8029 } 8030 8031 /** 8032 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 8033 * @pdev: pointer to PCI device 8034 * 8035 * This routine is to be called to disattach a device with SLI-3 interface 8036 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 8037 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8038 * device to be removed from the PCI subsystem properly. 8039 **/ 8040 static void __devexit 8041 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 8042 { 8043 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8044 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8045 struct lpfc_vport **vports; 8046 struct lpfc_hba *phba = vport->phba; 8047 int i; 8048 int bars = pci_select_bars(pdev, IORESOURCE_MEM); 8049 8050 spin_lock_irq(&phba->hbalock); 8051 vport->load_flag |= FC_UNLOADING; 8052 spin_unlock_irq(&phba->hbalock); 8053 8054 lpfc_free_sysfs_attr(vport); 8055 8056 /* Release all the vports against this physical port */ 8057 vports = lpfc_create_vport_work_array(phba); 8058 if (vports != NULL) 8059 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8060 fc_vport_terminate(vports[i]->fc_vport); 8061 lpfc_destroy_vport_work_array(phba, vports); 8062 8063 /* Remove FC host and then SCSI host with the physical port */ 8064 fc_remove_host(shost); 8065 scsi_remove_host(shost); 8066 lpfc_cleanup(vport); 8067 8068 /* 8069 * Bring down the SLI Layer. This step disable all interrupts, 8070 * clears the rings, discards all mailbox commands, and resets 8071 * the HBA. 8072 */ 8073 8074 /* HBA interrupt will be disabled after this call */ 8075 lpfc_sli_hba_down(phba); 8076 /* Stop kthread signal shall trigger work_done one more time */ 8077 kthread_stop(phba->worker_thread); 8078 /* Final cleanup of txcmplq and reset the HBA */ 8079 lpfc_sli_brdrestart(phba); 8080 8081 lpfc_stop_hba_timers(phba); 8082 spin_lock_irq(&phba->hbalock); 8083 list_del_init(&vport->listentry); 8084 spin_unlock_irq(&phba->hbalock); 8085 8086 lpfc_debugfs_terminate(vport); 8087 8088 /* Disable interrupt */ 8089 lpfc_sli_disable_intr(phba); 8090 8091 pci_set_drvdata(pdev, NULL); 8092 scsi_host_put(shost); 8093 8094 /* 8095 * Call scsi_free before mem_free since scsi bufs are released to their 8096 * corresponding pools here. 8097 */ 8098 lpfc_scsi_free(phba); 8099 lpfc_mem_free_all(phba); 8100 8101 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 8102 phba->hbqslimp.virt, phba->hbqslimp.phys); 8103 8104 /* Free resources associated with SLI2 interface */ 8105 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 8106 phba->slim2p.virt, phba->slim2p.phys); 8107 8108 /* unmap adapter SLIM and Control Registers */ 8109 iounmap(phba->ctrl_regs_memmap_p); 8110 iounmap(phba->slim_memmap_p); 8111 8112 lpfc_hba_free(phba); 8113 8114 pci_release_selected_regions(pdev, bars); 8115 pci_disable_device(pdev); 8116 } 8117 8118 /** 8119 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 8120 * @pdev: pointer to PCI device 8121 * @msg: power management message 8122 * 8123 * This routine is to be called from the kernel's PCI subsystem to support 8124 * system Power Management (PM) to device with SLI-3 interface spec. When 8125 * PM invokes this method, it quiesces the device by stopping the driver's 8126 * worker thread for the device, turning off device's interrupt and DMA, 8127 * and bring the device offline. Note that as the driver implements the 8128 * minimum PM requirements to a power-aware driver's PM support for the 8129 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8130 * to the suspend() method call will be treated as SUSPEND and the driver will 8131 * fully reinitialize its device during resume() method call, the driver will 8132 * set device to PCI_D3hot state in PCI config space instead of setting it 8133 * according to the @msg provided by the PM. 8134 * 8135 * Return code 8136 * 0 - driver suspended the device 8137 * Error otherwise 8138 **/ 8139 static int 8140 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 8141 { 8142 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8143 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8144 8145 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8146 "0473 PCI device Power Management suspend.\n"); 8147 8148 /* Bring down the device */ 8149 lpfc_offline_prep(phba); 8150 lpfc_offline(phba); 8151 kthread_stop(phba->worker_thread); 8152 8153 /* Disable interrupt from device */ 8154 lpfc_sli_disable_intr(phba); 8155 8156 /* Save device state to PCI config space */ 8157 pci_save_state(pdev); 8158 pci_set_power_state(pdev, PCI_D3hot); 8159 8160 return 0; 8161 } 8162 8163 /** 8164 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 8165 * @pdev: pointer to PCI device 8166 * 8167 * This routine is to be called from the kernel's PCI subsystem to support 8168 * system Power Management (PM) to device with SLI-3 interface spec. When PM 8169 * invokes this method, it restores the device's PCI config space state and 8170 * fully reinitializes the device and brings it online. Note that as the 8171 * driver implements the minimum PM requirements to a power-aware driver's 8172 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 8173 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 8174 * driver will fully reinitialize its device during resume() method call, 8175 * the device will be set to PCI_D0 directly in PCI config space before 8176 * restoring the state. 8177 * 8178 * Return code 8179 * 0 - driver suspended the device 8180 * Error otherwise 8181 **/ 8182 static int 8183 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 8184 { 8185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8187 uint32_t intr_mode; 8188 int error; 8189 8190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8191 "0452 PCI device Power Management resume.\n"); 8192 8193 /* Restore device state from PCI config space */ 8194 pci_set_power_state(pdev, PCI_D0); 8195 pci_restore_state(pdev); 8196 8197 /* 8198 * As the new kernel behavior of pci_restore_state() API call clears 8199 * device saved_state flag, need to save the restored state again. 8200 */ 8201 pci_save_state(pdev); 8202 8203 if (pdev->is_busmaster) 8204 pci_set_master(pdev); 8205 8206 /* Startup the kernel thread for this host adapter. */ 8207 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8208 "lpfc_worker_%d", phba->brd_no); 8209 if (IS_ERR(phba->worker_thread)) { 8210 error = PTR_ERR(phba->worker_thread); 8211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8212 "0434 PM resume failed to start worker " 8213 "thread: error=x%x.\n", error); 8214 return error; 8215 } 8216 8217 /* Configure and enable interrupt */ 8218 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8219 if (intr_mode == LPFC_INTR_ERROR) { 8220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8221 "0430 PM resume Failed to enable interrupt\n"); 8222 return -EIO; 8223 } else 8224 phba->intr_mode = intr_mode; 8225 8226 /* Restart HBA and bring it online */ 8227 lpfc_sli_brdrestart(phba); 8228 lpfc_online(phba); 8229 8230 /* Log the current active interrupt mode */ 8231 lpfc_log_intr_mode(phba, phba->intr_mode); 8232 8233 return 0; 8234 } 8235 8236 /** 8237 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 8238 * @phba: pointer to lpfc hba data structure. 8239 * 8240 * This routine is called to prepare the SLI3 device for PCI slot recover. It 8241 * aborts all the outstanding SCSI I/Os to the pci device. 8242 **/ 8243 static void 8244 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 8245 { 8246 struct lpfc_sli *psli = &phba->sli; 8247 struct lpfc_sli_ring *pring; 8248 8249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8250 "2723 PCI channel I/O abort preparing for recovery\n"); 8251 8252 /* 8253 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8254 * and let the SCSI mid-layer to retry them to recover. 8255 */ 8256 pring = &psli->ring[psli->fcp_ring]; 8257 lpfc_sli_abort_iocb_ring(phba, pring); 8258 } 8259 8260 /** 8261 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 8262 * @phba: pointer to lpfc hba data structure. 8263 * 8264 * This routine is called to prepare the SLI3 device for PCI slot reset. It 8265 * disables the device interrupt and pci device, and aborts the internal FCP 8266 * pending I/Os. 8267 **/ 8268 static void 8269 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 8270 { 8271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8272 "2710 PCI channel disable preparing for reset\n"); 8273 8274 /* Block any management I/Os to the device */ 8275 lpfc_block_mgmt_io(phba); 8276 8277 /* Block all SCSI devices' I/Os on the host */ 8278 lpfc_scsi_dev_block(phba); 8279 8280 /* stop all timers */ 8281 lpfc_stop_hba_timers(phba); 8282 8283 /* Disable interrupt and pci device */ 8284 lpfc_sli_disable_intr(phba); 8285 pci_disable_device(phba->pcidev); 8286 8287 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8288 lpfc_sli_flush_fcp_rings(phba); 8289 } 8290 8291 /** 8292 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 8293 * @phba: pointer to lpfc hba data structure. 8294 * 8295 * This routine is called to prepare the SLI3 device for PCI slot permanently 8296 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8297 * pending I/Os. 8298 **/ 8299 static void 8300 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8301 { 8302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8303 "2711 PCI channel permanent disable for failure\n"); 8304 /* Block all SCSI devices' I/Os on the host */ 8305 lpfc_scsi_dev_block(phba); 8306 8307 /* stop all timers */ 8308 lpfc_stop_hba_timers(phba); 8309 8310 /* Clean up all driver's outstanding SCSI I/Os */ 8311 lpfc_sli_flush_fcp_rings(phba); 8312 } 8313 8314 /** 8315 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 8316 * @pdev: pointer to PCI device. 8317 * @state: the current PCI connection state. 8318 * 8319 * This routine is called from the PCI subsystem for I/O error handling to 8320 * device with SLI-3 interface spec. This function is called by the PCI 8321 * subsystem after a PCI bus error affecting this device has been detected. 8322 * When this function is invoked, it will need to stop all the I/Os and 8323 * interrupt(s) to the device. Once that is done, it will return 8324 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 8325 * as desired. 8326 * 8327 * Return codes 8328 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 8329 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8330 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8331 **/ 8332 static pci_ers_result_t 8333 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 8334 { 8335 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8336 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8337 8338 switch (state) { 8339 case pci_channel_io_normal: 8340 /* Non-fatal error, prepare for recovery */ 8341 lpfc_sli_prep_dev_for_recover(phba); 8342 return PCI_ERS_RESULT_CAN_RECOVER; 8343 case pci_channel_io_frozen: 8344 /* Fatal error, prepare for slot reset */ 8345 lpfc_sli_prep_dev_for_reset(phba); 8346 return PCI_ERS_RESULT_NEED_RESET; 8347 case pci_channel_io_perm_failure: 8348 /* Permanent failure, prepare for device down */ 8349 lpfc_sli_prep_dev_for_perm_failure(phba); 8350 return PCI_ERS_RESULT_DISCONNECT; 8351 default: 8352 /* Unknown state, prepare and request slot reset */ 8353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8354 "0472 Unknown PCI error state: x%x\n", state); 8355 lpfc_sli_prep_dev_for_reset(phba); 8356 return PCI_ERS_RESULT_NEED_RESET; 8357 } 8358 } 8359 8360 /** 8361 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 8362 * @pdev: pointer to PCI device. 8363 * 8364 * This routine is called from the PCI subsystem for error handling to 8365 * device with SLI-3 interface spec. This is called after PCI bus has been 8366 * reset to restart the PCI card from scratch, as if from a cold-boot. 8367 * During the PCI subsystem error recovery, after driver returns 8368 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8369 * recovery and then call this routine before calling the .resume method 8370 * to recover the device. This function will initialize the HBA device, 8371 * enable the interrupt, but it will just put the HBA to offline state 8372 * without passing any I/O traffic. 8373 * 8374 * Return codes 8375 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 8376 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8377 */ 8378 static pci_ers_result_t 8379 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 8380 { 8381 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8382 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8383 struct lpfc_sli *psli = &phba->sli; 8384 uint32_t intr_mode; 8385 8386 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 8387 if (pci_enable_device_mem(pdev)) { 8388 printk(KERN_ERR "lpfc: Cannot re-enable " 8389 "PCI device after reset.\n"); 8390 return PCI_ERS_RESULT_DISCONNECT; 8391 } 8392 8393 pci_restore_state(pdev); 8394 8395 /* 8396 * As the new kernel behavior of pci_restore_state() API call clears 8397 * device saved_state flag, need to save the restored state again. 8398 */ 8399 pci_save_state(pdev); 8400 8401 if (pdev->is_busmaster) 8402 pci_set_master(pdev); 8403 8404 spin_lock_irq(&phba->hbalock); 8405 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8406 spin_unlock_irq(&phba->hbalock); 8407 8408 /* Configure and enable interrupt */ 8409 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 8410 if (intr_mode == LPFC_INTR_ERROR) { 8411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8412 "0427 Cannot re-enable interrupt after " 8413 "slot reset.\n"); 8414 return PCI_ERS_RESULT_DISCONNECT; 8415 } else 8416 phba->intr_mode = intr_mode; 8417 8418 /* Take device offline, it will perform cleanup */ 8419 lpfc_offline_prep(phba); 8420 lpfc_offline(phba); 8421 lpfc_sli_brdrestart(phba); 8422 8423 /* Log the current active interrupt mode */ 8424 lpfc_log_intr_mode(phba, phba->intr_mode); 8425 8426 return PCI_ERS_RESULT_RECOVERED; 8427 } 8428 8429 /** 8430 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 8431 * @pdev: pointer to PCI device 8432 * 8433 * This routine is called from the PCI subsystem for error handling to device 8434 * with SLI-3 interface spec. It is called when kernel error recovery tells 8435 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 8436 * error recovery. After this call, traffic can start to flow from this device 8437 * again. 8438 */ 8439 static void 8440 lpfc_io_resume_s3(struct pci_dev *pdev) 8441 { 8442 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8443 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8444 8445 /* Bring device online, it will be no-op for non-fatal error resume */ 8446 lpfc_online(phba); 8447 8448 /* Clean up Advanced Error Reporting (AER) if needed */ 8449 if (phba->hba_flag & HBA_AER_ENABLED) 8450 pci_cleanup_aer_uncorrect_error_status(pdev); 8451 } 8452 8453 /** 8454 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 8455 * @phba: pointer to lpfc hba data structure. 8456 * 8457 * returns the number of ELS/CT IOCBs to reserve 8458 **/ 8459 int 8460 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 8461 { 8462 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 8463 8464 if (phba->sli_rev == LPFC_SLI_REV4) { 8465 if (max_xri <= 100) 8466 return 10; 8467 else if (max_xri <= 256) 8468 return 25; 8469 else if (max_xri <= 512) 8470 return 50; 8471 else if (max_xri <= 1024) 8472 return 100; 8473 else 8474 return 150; 8475 } else 8476 return 0; 8477 } 8478 8479 /** 8480 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 8481 * @pdev: pointer to PCI device 8482 * @pid: pointer to PCI device identifier 8483 * 8484 * This routine is called from the kernel's PCI subsystem to device with 8485 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8486 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 8487 * information of the device and driver to see if the driver state that it 8488 * can support this kind of device. If the match is successful, the driver 8489 * core invokes this routine. If this routine determines it can claim the HBA, 8490 * it does all the initialization that it needs to do to handle the HBA 8491 * properly. 8492 * 8493 * Return code 8494 * 0 - driver can claim the device 8495 * negative value - driver can not claim the device 8496 **/ 8497 static int __devinit 8498 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 8499 { 8500 struct lpfc_hba *phba; 8501 struct lpfc_vport *vport = NULL; 8502 struct Scsi_Host *shost = NULL; 8503 int error; 8504 uint32_t cfg_mode, intr_mode; 8505 int mcnt; 8506 8507 /* Allocate memory for HBA structure */ 8508 phba = lpfc_hba_alloc(pdev); 8509 if (!phba) 8510 return -ENOMEM; 8511 8512 /* Perform generic PCI device enabling operation */ 8513 error = lpfc_enable_pci_dev(phba); 8514 if (error) { 8515 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8516 "1409 Failed to enable pci device.\n"); 8517 goto out_free_phba; 8518 } 8519 8520 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 8521 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 8522 if (error) 8523 goto out_disable_pci_dev; 8524 8525 /* Set up SLI-4 specific device PCI memory space */ 8526 error = lpfc_sli4_pci_mem_setup(phba); 8527 if (error) { 8528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8529 "1410 Failed to set up pci memory space.\n"); 8530 goto out_disable_pci_dev; 8531 } 8532 8533 /* Set up phase-1 common device driver resources */ 8534 error = lpfc_setup_driver_resource_phase1(phba); 8535 if (error) { 8536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8537 "1411 Failed to set up driver resource.\n"); 8538 goto out_unset_pci_mem_s4; 8539 } 8540 8541 /* Set up SLI-4 Specific device driver resources */ 8542 error = lpfc_sli4_driver_resource_setup(phba); 8543 if (error) { 8544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8545 "1412 Failed to set up driver resource.\n"); 8546 goto out_unset_pci_mem_s4; 8547 } 8548 8549 /* Initialize and populate the iocb list per host */ 8550 8551 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8552 "2821 initialize iocb list %d.\n", 8553 phba->cfg_iocb_cnt*1024); 8554 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 8555 8556 if (error) { 8557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8558 "1413 Failed to initialize iocb list.\n"); 8559 goto out_unset_driver_resource_s4; 8560 } 8561 8562 INIT_LIST_HEAD(&phba->active_rrq_list); 8563 8564 /* Set up common device driver resources */ 8565 error = lpfc_setup_driver_resource_phase2(phba); 8566 if (error) { 8567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8568 "1414 Failed to set up driver resource.\n"); 8569 goto out_free_iocb_list; 8570 } 8571 8572 /* Create SCSI host to the physical port */ 8573 error = lpfc_create_shost(phba); 8574 if (error) { 8575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8576 "1415 Failed to create scsi host.\n"); 8577 goto out_unset_driver_resource; 8578 } 8579 8580 /* Configure sysfs attributes */ 8581 vport = phba->pport; 8582 error = lpfc_alloc_sysfs_attr(vport); 8583 if (error) { 8584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8585 "1416 Failed to allocate sysfs attr\n"); 8586 goto out_destroy_shost; 8587 } 8588 8589 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 8590 /* Now, trying to enable interrupt and bring up the device */ 8591 cfg_mode = phba->cfg_use_msi; 8592 while (true) { 8593 /* Put device to a known state before enabling interrupt */ 8594 lpfc_stop_port(phba); 8595 /* Configure and enable interrupt */ 8596 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 8597 if (intr_mode == LPFC_INTR_ERROR) { 8598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8599 "0426 Failed to enable interrupt.\n"); 8600 error = -ENODEV; 8601 goto out_free_sysfs_attr; 8602 } 8603 /* Default to single FCP EQ for non-MSI-X */ 8604 if (phba->intr_type != MSIX) 8605 phba->cfg_fcp_eq_count = 1; 8606 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count) 8607 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 8608 /* Set up SLI-4 HBA */ 8609 if (lpfc_sli4_hba_setup(phba)) { 8610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8611 "1421 Failed to set up hba\n"); 8612 error = -ENODEV; 8613 goto out_disable_intr; 8614 } 8615 8616 /* Send NOP mbx cmds for non-INTx mode active interrupt test */ 8617 if (intr_mode != 0) 8618 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, 8619 LPFC_ACT_INTR_CNT); 8620 8621 /* Check active interrupts received only for MSI/MSI-X */ 8622 if (intr_mode == 0 || 8623 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { 8624 /* Log the current active interrupt mode */ 8625 phba->intr_mode = intr_mode; 8626 lpfc_log_intr_mode(phba, intr_mode); 8627 break; 8628 } 8629 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8630 "0451 Configure interrupt mode (%d) " 8631 "failed active interrupt test.\n", 8632 intr_mode); 8633 /* Unset the previous SLI-4 HBA setup. */ 8634 /* 8635 * TODO: Is this operation compatible with IF TYPE 2 8636 * devices? All port state is deleted and cleared. 8637 */ 8638 lpfc_sli4_unset_hba(phba); 8639 /* Try next level of interrupt mode */ 8640 cfg_mode = --intr_mode; 8641 } 8642 8643 /* Perform post initialization setup */ 8644 lpfc_post_init_setup(phba); 8645 8646 /* Check if there are static vports to be created. */ 8647 lpfc_create_static_vport(phba); 8648 8649 return 0; 8650 8651 out_disable_intr: 8652 lpfc_sli4_disable_intr(phba); 8653 out_free_sysfs_attr: 8654 lpfc_free_sysfs_attr(vport); 8655 out_destroy_shost: 8656 lpfc_destroy_shost(phba); 8657 out_unset_driver_resource: 8658 lpfc_unset_driver_resource_phase2(phba); 8659 out_free_iocb_list: 8660 lpfc_free_iocb_list(phba); 8661 out_unset_driver_resource_s4: 8662 lpfc_sli4_driver_resource_unset(phba); 8663 out_unset_pci_mem_s4: 8664 lpfc_sli4_pci_mem_unset(phba); 8665 out_disable_pci_dev: 8666 lpfc_disable_pci_dev(phba); 8667 if (shost) 8668 scsi_host_put(shost); 8669 out_free_phba: 8670 lpfc_hba_free(phba); 8671 return error; 8672 } 8673 8674 /** 8675 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 8676 * @pdev: pointer to PCI device 8677 * 8678 * This routine is called from the kernel's PCI subsystem to device with 8679 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 8680 * removed from PCI bus, it performs all the necessary cleanup for the HBA 8681 * device to be removed from the PCI subsystem properly. 8682 **/ 8683 static void __devexit 8684 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 8685 { 8686 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8687 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 8688 struct lpfc_vport **vports; 8689 struct lpfc_hba *phba = vport->phba; 8690 int i; 8691 8692 /* Mark the device unloading flag */ 8693 spin_lock_irq(&phba->hbalock); 8694 vport->load_flag |= FC_UNLOADING; 8695 spin_unlock_irq(&phba->hbalock); 8696 8697 /* Free the HBA sysfs attributes */ 8698 lpfc_free_sysfs_attr(vport); 8699 8700 /* Release all the vports against this physical port */ 8701 vports = lpfc_create_vport_work_array(phba); 8702 if (vports != NULL) 8703 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) 8704 fc_vport_terminate(vports[i]->fc_vport); 8705 lpfc_destroy_vport_work_array(phba, vports); 8706 8707 /* Remove FC host and then SCSI host with the physical port */ 8708 fc_remove_host(shost); 8709 scsi_remove_host(shost); 8710 8711 /* Perform cleanup on the physical port */ 8712 lpfc_cleanup(vport); 8713 8714 /* 8715 * Bring down the SLI Layer. This step disables all interrupts, 8716 * clears the rings, discards all mailbox commands, and resets 8717 * the HBA FCoE function. 8718 */ 8719 lpfc_debugfs_terminate(vport); 8720 lpfc_sli4_hba_unset(phba); 8721 8722 spin_lock_irq(&phba->hbalock); 8723 list_del_init(&vport->listentry); 8724 spin_unlock_irq(&phba->hbalock); 8725 8726 /* Perform scsi free before driver resource_unset since scsi 8727 * buffers are released to their corresponding pools here. 8728 */ 8729 lpfc_scsi_free(phba); 8730 lpfc_sli4_driver_resource_unset(phba); 8731 8732 /* Unmap adapter Control and Doorbell registers */ 8733 lpfc_sli4_pci_mem_unset(phba); 8734 8735 /* Release PCI resources and disable device's PCI function */ 8736 scsi_host_put(shost); 8737 lpfc_disable_pci_dev(phba); 8738 8739 /* Finally, free the driver's device data structure */ 8740 lpfc_hba_free(phba); 8741 8742 return; 8743 } 8744 8745 /** 8746 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 8747 * @pdev: pointer to PCI device 8748 * @msg: power management message 8749 * 8750 * This routine is called from the kernel's PCI subsystem to support system 8751 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 8752 * this method, it quiesces the device by stopping the driver's worker 8753 * thread for the device, turning off device's interrupt and DMA, and bring 8754 * the device offline. Note that as the driver implements the minimum PM 8755 * requirements to a power-aware driver's PM support for suspend/resume -- all 8756 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 8757 * method call will be treated as SUSPEND and the driver will fully 8758 * reinitialize its device during resume() method call, the driver will set 8759 * device to PCI_D3hot state in PCI config space instead of setting it 8760 * according to the @msg provided by the PM. 8761 * 8762 * Return code 8763 * 0 - driver suspended the device 8764 * Error otherwise 8765 **/ 8766 static int 8767 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 8768 { 8769 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8770 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8771 8772 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8773 "2843 PCI device Power Management suspend.\n"); 8774 8775 /* Bring down the device */ 8776 lpfc_offline_prep(phba); 8777 lpfc_offline(phba); 8778 kthread_stop(phba->worker_thread); 8779 8780 /* Disable interrupt from device */ 8781 lpfc_sli4_disable_intr(phba); 8782 8783 /* Save device state to PCI config space */ 8784 pci_save_state(pdev); 8785 pci_set_power_state(pdev, PCI_D3hot); 8786 8787 return 0; 8788 } 8789 8790 /** 8791 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 8792 * @pdev: pointer to PCI device 8793 * 8794 * This routine is called from the kernel's PCI subsystem to support system 8795 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 8796 * this method, it restores the device's PCI config space state and fully 8797 * reinitializes the device and brings it online. Note that as the driver 8798 * implements the minimum PM requirements to a power-aware driver's PM for 8799 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 8800 * to the suspend() method call will be treated as SUSPEND and the driver 8801 * will fully reinitialize its device during resume() method call, the device 8802 * will be set to PCI_D0 directly in PCI config space before restoring the 8803 * state. 8804 * 8805 * Return code 8806 * 0 - driver suspended the device 8807 * Error otherwise 8808 **/ 8809 static int 8810 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 8811 { 8812 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8813 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8814 uint32_t intr_mode; 8815 int error; 8816 8817 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8818 "0292 PCI device Power Management resume.\n"); 8819 8820 /* Restore device state from PCI config space */ 8821 pci_set_power_state(pdev, PCI_D0); 8822 pci_restore_state(pdev); 8823 8824 /* 8825 * As the new kernel behavior of pci_restore_state() API call clears 8826 * device saved_state flag, need to save the restored state again. 8827 */ 8828 pci_save_state(pdev); 8829 8830 if (pdev->is_busmaster) 8831 pci_set_master(pdev); 8832 8833 /* Startup the kernel thread for this host adapter. */ 8834 phba->worker_thread = kthread_run(lpfc_do_work, phba, 8835 "lpfc_worker_%d", phba->brd_no); 8836 if (IS_ERR(phba->worker_thread)) { 8837 error = PTR_ERR(phba->worker_thread); 8838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8839 "0293 PM resume failed to start worker " 8840 "thread: error=x%x.\n", error); 8841 return error; 8842 } 8843 8844 /* Configure and enable interrupt */ 8845 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 8846 if (intr_mode == LPFC_INTR_ERROR) { 8847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8848 "0294 PM resume Failed to enable interrupt\n"); 8849 return -EIO; 8850 } else 8851 phba->intr_mode = intr_mode; 8852 8853 /* Restart HBA and bring it online */ 8854 lpfc_sli_brdrestart(phba); 8855 lpfc_online(phba); 8856 8857 /* Log the current active interrupt mode */ 8858 lpfc_log_intr_mode(phba, phba->intr_mode); 8859 8860 return 0; 8861 } 8862 8863 /** 8864 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 8865 * @phba: pointer to lpfc hba data structure. 8866 * 8867 * This routine is called to prepare the SLI4 device for PCI slot recover. It 8868 * aborts all the outstanding SCSI I/Os to the pci device. 8869 **/ 8870 static void 8871 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 8872 { 8873 struct lpfc_sli *psli = &phba->sli; 8874 struct lpfc_sli_ring *pring; 8875 8876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8877 "2828 PCI channel I/O abort preparing for recovery\n"); 8878 /* 8879 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 8880 * and let the SCSI mid-layer to retry them to recover. 8881 */ 8882 pring = &psli->ring[psli->fcp_ring]; 8883 lpfc_sli_abort_iocb_ring(phba, pring); 8884 } 8885 8886 /** 8887 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 8888 * @phba: pointer to lpfc hba data structure. 8889 * 8890 * This routine is called to prepare the SLI4 device for PCI slot reset. It 8891 * disables the device interrupt and pci device, and aborts the internal FCP 8892 * pending I/Os. 8893 **/ 8894 static void 8895 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 8896 { 8897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8898 "2826 PCI channel disable preparing for reset\n"); 8899 8900 /* Block any management I/Os to the device */ 8901 lpfc_block_mgmt_io(phba); 8902 8903 /* Block all SCSI devices' I/Os on the host */ 8904 lpfc_scsi_dev_block(phba); 8905 8906 /* stop all timers */ 8907 lpfc_stop_hba_timers(phba); 8908 8909 /* Disable interrupt and pci device */ 8910 lpfc_sli4_disable_intr(phba); 8911 pci_disable_device(phba->pcidev); 8912 8913 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 8914 lpfc_sli_flush_fcp_rings(phba); 8915 } 8916 8917 /** 8918 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 8919 * @phba: pointer to lpfc hba data structure. 8920 * 8921 * This routine is called to prepare the SLI4 device for PCI slot permanently 8922 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 8923 * pending I/Os. 8924 **/ 8925 static void 8926 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 8927 { 8928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8929 "2827 PCI channel permanent disable for failure\n"); 8930 8931 /* Block all SCSI devices' I/Os on the host */ 8932 lpfc_scsi_dev_block(phba); 8933 8934 /* stop all timers */ 8935 lpfc_stop_hba_timers(phba); 8936 8937 /* Clean up all driver's outstanding SCSI I/Os */ 8938 lpfc_sli_flush_fcp_rings(phba); 8939 } 8940 8941 /** 8942 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 8943 * @pdev: pointer to PCI device. 8944 * @state: the current PCI connection state. 8945 * 8946 * This routine is called from the PCI subsystem for error handling to device 8947 * with SLI-4 interface spec. This function is called by the PCI subsystem 8948 * after a PCI bus error affecting this device has been detected. When this 8949 * function is invoked, it will need to stop all the I/Os and interrupt(s) 8950 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 8951 * for the PCI subsystem to perform proper recovery as desired. 8952 * 8953 * Return codes 8954 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 8955 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 8956 **/ 8957 static pci_ers_result_t 8958 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 8959 { 8960 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8961 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 8962 8963 switch (state) { 8964 case pci_channel_io_normal: 8965 /* Non-fatal error, prepare for recovery */ 8966 lpfc_sli4_prep_dev_for_recover(phba); 8967 return PCI_ERS_RESULT_CAN_RECOVER; 8968 case pci_channel_io_frozen: 8969 /* Fatal error, prepare for slot reset */ 8970 lpfc_sli4_prep_dev_for_reset(phba); 8971 return PCI_ERS_RESULT_NEED_RESET; 8972 case pci_channel_io_perm_failure: 8973 /* Permanent failure, prepare for device down */ 8974 lpfc_sli4_prep_dev_for_perm_failure(phba); 8975 return PCI_ERS_RESULT_DISCONNECT; 8976 default: 8977 /* Unknown state, prepare and request slot reset */ 8978 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8979 "2825 Unknown PCI error state: x%x\n", state); 8980 lpfc_sli4_prep_dev_for_reset(phba); 8981 return PCI_ERS_RESULT_NEED_RESET; 8982 } 8983 } 8984 8985 /** 8986 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 8987 * @pdev: pointer to PCI device. 8988 * 8989 * This routine is called from the PCI subsystem for error handling to device 8990 * with SLI-4 interface spec. It is called after PCI bus has been reset to 8991 * restart the PCI card from scratch, as if from a cold-boot. During the 8992 * PCI subsystem error recovery, after the driver returns 8993 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 8994 * recovery and then call this routine before calling the .resume method to 8995 * recover the device. This function will initialize the HBA device, enable 8996 * the interrupt, but it will just put the HBA to offline state without 8997 * passing any I/O traffic. 8998 * 8999 * Return codes 9000 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9001 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9002 */ 9003 static pci_ers_result_t 9004 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 9005 { 9006 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9007 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9008 struct lpfc_sli *psli = &phba->sli; 9009 uint32_t intr_mode; 9010 9011 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 9012 if (pci_enable_device_mem(pdev)) { 9013 printk(KERN_ERR "lpfc: Cannot re-enable " 9014 "PCI device after reset.\n"); 9015 return PCI_ERS_RESULT_DISCONNECT; 9016 } 9017 9018 pci_restore_state(pdev); 9019 if (pdev->is_busmaster) 9020 pci_set_master(pdev); 9021 9022 spin_lock_irq(&phba->hbalock); 9023 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 9024 spin_unlock_irq(&phba->hbalock); 9025 9026 /* Configure and enable interrupt */ 9027 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 9028 if (intr_mode == LPFC_INTR_ERROR) { 9029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9030 "2824 Cannot re-enable interrupt after " 9031 "slot reset.\n"); 9032 return PCI_ERS_RESULT_DISCONNECT; 9033 } else 9034 phba->intr_mode = intr_mode; 9035 9036 /* Log the current active interrupt mode */ 9037 lpfc_log_intr_mode(phba, phba->intr_mode); 9038 9039 return PCI_ERS_RESULT_RECOVERED; 9040 } 9041 9042 /** 9043 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 9044 * @pdev: pointer to PCI device 9045 * 9046 * This routine is called from the PCI subsystem for error handling to device 9047 * with SLI-4 interface spec. It is called when kernel error recovery tells 9048 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 9049 * error recovery. After this call, traffic can start to flow from this device 9050 * again. 9051 **/ 9052 static void 9053 lpfc_io_resume_s4(struct pci_dev *pdev) 9054 { 9055 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9056 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9057 9058 /* 9059 * In case of slot reset, as function reset is performed through 9060 * mailbox command which needs DMA to be enabled, this operation 9061 * has to be moved to the io resume phase. Taking device offline 9062 * will perform the necessary cleanup. 9063 */ 9064 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 9065 /* Perform device reset */ 9066 lpfc_offline_prep(phba); 9067 lpfc_offline(phba); 9068 lpfc_sli_brdrestart(phba); 9069 /* Bring the device back online */ 9070 lpfc_online(phba); 9071 } 9072 9073 /* Clean up Advanced Error Reporting (AER) if needed */ 9074 if (phba->hba_flag & HBA_AER_ENABLED) 9075 pci_cleanup_aer_uncorrect_error_status(pdev); 9076 } 9077 9078 /** 9079 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 9080 * @pdev: pointer to PCI device 9081 * @pid: pointer to PCI device identifier 9082 * 9083 * This routine is to be registered to the kernel's PCI subsystem. When an 9084 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 9085 * at PCI device-specific information of the device and driver to see if the 9086 * driver state that it can support this kind of device. If the match is 9087 * successful, the driver core invokes this routine. This routine dispatches 9088 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 9089 * do all the initialization that it needs to do to handle the HBA device 9090 * properly. 9091 * 9092 * Return code 9093 * 0 - driver can claim the device 9094 * negative value - driver can not claim the device 9095 **/ 9096 static int __devinit 9097 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 9098 { 9099 int rc; 9100 struct lpfc_sli_intf intf; 9101 9102 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 9103 return -ENODEV; 9104 9105 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 9106 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 9107 rc = lpfc_pci_probe_one_s4(pdev, pid); 9108 else 9109 rc = lpfc_pci_probe_one_s3(pdev, pid); 9110 9111 return rc; 9112 } 9113 9114 /** 9115 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 9116 * @pdev: pointer to PCI device 9117 * 9118 * This routine is to be registered to the kernel's PCI subsystem. When an 9119 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 9120 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 9121 * remove routine, which will perform all the necessary cleanup for the 9122 * device to be removed from the PCI subsystem properly. 9123 **/ 9124 static void __devexit 9125 lpfc_pci_remove_one(struct pci_dev *pdev) 9126 { 9127 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9128 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9129 9130 switch (phba->pci_dev_grp) { 9131 case LPFC_PCI_DEV_LP: 9132 lpfc_pci_remove_one_s3(pdev); 9133 break; 9134 case LPFC_PCI_DEV_OC: 9135 lpfc_pci_remove_one_s4(pdev); 9136 break; 9137 default: 9138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9139 "1424 Invalid PCI device group: 0x%x\n", 9140 phba->pci_dev_grp); 9141 break; 9142 } 9143 return; 9144 } 9145 9146 /** 9147 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 9148 * @pdev: pointer to PCI device 9149 * @msg: power management message 9150 * 9151 * This routine is to be registered to the kernel's PCI subsystem to support 9152 * system Power Management (PM). When PM invokes this method, it dispatches 9153 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 9154 * suspend the device. 9155 * 9156 * Return code 9157 * 0 - driver suspended the device 9158 * Error otherwise 9159 **/ 9160 static int 9161 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 9162 { 9163 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9164 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9165 int rc = -ENODEV; 9166 9167 switch (phba->pci_dev_grp) { 9168 case LPFC_PCI_DEV_LP: 9169 rc = lpfc_pci_suspend_one_s3(pdev, msg); 9170 break; 9171 case LPFC_PCI_DEV_OC: 9172 rc = lpfc_pci_suspend_one_s4(pdev, msg); 9173 break; 9174 default: 9175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9176 "1425 Invalid PCI device group: 0x%x\n", 9177 phba->pci_dev_grp); 9178 break; 9179 } 9180 return rc; 9181 } 9182 9183 /** 9184 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 9185 * @pdev: pointer to PCI device 9186 * 9187 * This routine is to be registered to the kernel's PCI subsystem to support 9188 * system Power Management (PM). When PM invokes this method, it dispatches 9189 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 9190 * resume the device. 9191 * 9192 * Return code 9193 * 0 - driver suspended the device 9194 * Error otherwise 9195 **/ 9196 static int 9197 lpfc_pci_resume_one(struct pci_dev *pdev) 9198 { 9199 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9200 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9201 int rc = -ENODEV; 9202 9203 switch (phba->pci_dev_grp) { 9204 case LPFC_PCI_DEV_LP: 9205 rc = lpfc_pci_resume_one_s3(pdev); 9206 break; 9207 case LPFC_PCI_DEV_OC: 9208 rc = lpfc_pci_resume_one_s4(pdev); 9209 break; 9210 default: 9211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9212 "1426 Invalid PCI device group: 0x%x\n", 9213 phba->pci_dev_grp); 9214 break; 9215 } 9216 return rc; 9217 } 9218 9219 /** 9220 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 9221 * @pdev: pointer to PCI device. 9222 * @state: the current PCI connection state. 9223 * 9224 * This routine is registered to the PCI subsystem for error handling. This 9225 * function is called by the PCI subsystem after a PCI bus error affecting 9226 * this device has been detected. When this routine is invoked, it dispatches 9227 * the action to the proper SLI-3 or SLI-4 device error detected handling 9228 * routine, which will perform the proper error detected operation. 9229 * 9230 * Return codes 9231 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 9232 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9233 **/ 9234 static pci_ers_result_t 9235 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 9236 { 9237 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9238 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9239 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9240 9241 switch (phba->pci_dev_grp) { 9242 case LPFC_PCI_DEV_LP: 9243 rc = lpfc_io_error_detected_s3(pdev, state); 9244 break; 9245 case LPFC_PCI_DEV_OC: 9246 rc = lpfc_io_error_detected_s4(pdev, state); 9247 break; 9248 default: 9249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9250 "1427 Invalid PCI device group: 0x%x\n", 9251 phba->pci_dev_grp); 9252 break; 9253 } 9254 return rc; 9255 } 9256 9257 /** 9258 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 9259 * @pdev: pointer to PCI device. 9260 * 9261 * This routine is registered to the PCI subsystem for error handling. This 9262 * function is called after PCI bus has been reset to restart the PCI card 9263 * from scratch, as if from a cold-boot. When this routine is invoked, it 9264 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 9265 * routine, which will perform the proper device reset. 9266 * 9267 * Return codes 9268 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 9269 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 9270 **/ 9271 static pci_ers_result_t 9272 lpfc_io_slot_reset(struct pci_dev *pdev) 9273 { 9274 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9275 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9276 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 9277 9278 switch (phba->pci_dev_grp) { 9279 case LPFC_PCI_DEV_LP: 9280 rc = lpfc_io_slot_reset_s3(pdev); 9281 break; 9282 case LPFC_PCI_DEV_OC: 9283 rc = lpfc_io_slot_reset_s4(pdev); 9284 break; 9285 default: 9286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9287 "1428 Invalid PCI device group: 0x%x\n", 9288 phba->pci_dev_grp); 9289 break; 9290 } 9291 return rc; 9292 } 9293 9294 /** 9295 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 9296 * @pdev: pointer to PCI device 9297 * 9298 * This routine is registered to the PCI subsystem for error handling. It 9299 * is called when kernel error recovery tells the lpfc driver that it is 9300 * OK to resume normal PCI operation after PCI bus error recovery. When 9301 * this routine is invoked, it dispatches the action to the proper SLI-3 9302 * or SLI-4 device io_resume routine, which will resume the device operation. 9303 **/ 9304 static void 9305 lpfc_io_resume(struct pci_dev *pdev) 9306 { 9307 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9308 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9309 9310 switch (phba->pci_dev_grp) { 9311 case LPFC_PCI_DEV_LP: 9312 lpfc_io_resume_s3(pdev); 9313 break; 9314 case LPFC_PCI_DEV_OC: 9315 lpfc_io_resume_s4(pdev); 9316 break; 9317 default: 9318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9319 "1429 Invalid PCI device group: 0x%x\n", 9320 phba->pci_dev_grp); 9321 break; 9322 } 9323 return; 9324 } 9325 9326 static struct pci_device_id lpfc_id_table[] = { 9327 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 9328 PCI_ANY_ID, PCI_ANY_ID, }, 9329 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY, 9330 PCI_ANY_ID, PCI_ANY_ID, }, 9331 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR, 9332 PCI_ANY_ID, PCI_ANY_ID, }, 9333 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS, 9334 PCI_ANY_ID, PCI_ANY_ID, }, 9335 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR, 9336 PCI_ANY_ID, PCI_ANY_ID, }, 9337 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY, 9338 PCI_ANY_ID, PCI_ANY_ID, }, 9339 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY, 9340 PCI_ANY_ID, PCI_ANY_ID, }, 9341 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY, 9342 PCI_ANY_ID, PCI_ANY_ID, }, 9343 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY, 9344 PCI_ANY_ID, PCI_ANY_ID, }, 9345 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE, 9346 PCI_ANY_ID, PCI_ANY_ID, }, 9347 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP, 9348 PCI_ANY_ID, PCI_ANY_ID, }, 9349 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP, 9350 PCI_ANY_ID, PCI_ANY_ID, }, 9351 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS, 9352 PCI_ANY_ID, PCI_ANY_ID, }, 9353 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP, 9354 PCI_ANY_ID, PCI_ANY_ID, }, 9355 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP, 9356 PCI_ANY_ID, PCI_ANY_ID, }, 9357 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID, 9358 PCI_ANY_ID, PCI_ANY_ID, }, 9359 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB, 9360 PCI_ANY_ID, PCI_ANY_ID, }, 9361 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR, 9362 PCI_ANY_ID, PCI_ANY_ID, }, 9363 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET, 9364 PCI_ANY_ID, PCI_ANY_ID, }, 9365 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP, 9366 PCI_ANY_ID, PCI_ANY_ID, }, 9367 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP, 9368 PCI_ANY_ID, PCI_ANY_ID, }, 9369 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID, 9370 PCI_ANY_ID, PCI_ANY_ID, }, 9371 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB, 9372 PCI_ANY_ID, PCI_ANY_ID, }, 9373 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY, 9374 PCI_ANY_ID, PCI_ANY_ID, }, 9375 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101, 9376 PCI_ANY_ID, PCI_ANY_ID, }, 9377 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S, 9378 PCI_ANY_ID, PCI_ANY_ID, }, 9379 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S, 9380 PCI_ANY_ID, PCI_ANY_ID, }, 9381 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S, 9382 PCI_ANY_ID, PCI_ANY_ID, }, 9383 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT, 9384 PCI_ANY_ID, PCI_ANY_ID, }, 9385 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID, 9386 PCI_ANY_ID, PCI_ANY_ID, }, 9387 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB, 9388 PCI_ANY_ID, PCI_ANY_ID, }, 9389 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP, 9390 PCI_ANY_ID, PCI_ANY_ID, }, 9391 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP, 9392 PCI_ANY_ID, PCI_ANY_ID, }, 9393 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S, 9394 PCI_ANY_ID, PCI_ANY_ID, }, 9395 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF, 9396 PCI_ANY_ID, PCI_ANY_ID, }, 9397 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF, 9398 PCI_ANY_ID, PCI_ANY_ID, }, 9399 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, 9400 PCI_ANY_ID, PCI_ANY_ID, }, 9401 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 9402 PCI_ANY_ID, PCI_ANY_ID, }, 9403 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT, 9404 PCI_ANY_ID, PCI_ANY_ID, }, 9405 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON, 9406 PCI_ANY_ID, PCI_ANY_ID, }, 9407 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, 9408 PCI_ANY_ID, PCI_ANY_ID, }, 9409 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, 9410 PCI_ANY_ID, PCI_ANY_ID, }, 9411 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, 9412 PCI_ANY_ID, PCI_ANY_ID, }, 9413 { 0 } 9414 }; 9415 9416 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 9417 9418 static struct pci_error_handlers lpfc_err_handler = { 9419 .error_detected = lpfc_io_error_detected, 9420 .slot_reset = lpfc_io_slot_reset, 9421 .resume = lpfc_io_resume, 9422 }; 9423 9424 static struct pci_driver lpfc_driver = { 9425 .name = LPFC_DRIVER_NAME, 9426 .id_table = lpfc_id_table, 9427 .probe = lpfc_pci_probe_one, 9428 .remove = __devexit_p(lpfc_pci_remove_one), 9429 .suspend = lpfc_pci_suspend_one, 9430 .resume = lpfc_pci_resume_one, 9431 .err_handler = &lpfc_err_handler, 9432 }; 9433 9434 /** 9435 * lpfc_init - lpfc module initialization routine 9436 * 9437 * This routine is to be invoked when the lpfc module is loaded into the 9438 * kernel. The special kernel macro module_init() is used to indicate the 9439 * role of this routine to the kernel as lpfc module entry point. 9440 * 9441 * Return codes 9442 * 0 - successful 9443 * -ENOMEM - FC attach transport failed 9444 * all others - failed 9445 */ 9446 static int __init 9447 lpfc_init(void) 9448 { 9449 int error = 0; 9450 9451 printk(LPFC_MODULE_DESC "\n"); 9452 printk(LPFC_COPYRIGHT "\n"); 9453 9454 if (lpfc_enable_npiv) { 9455 lpfc_transport_functions.vport_create = lpfc_vport_create; 9456 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 9457 } 9458 lpfc_transport_template = 9459 fc_attach_transport(&lpfc_transport_functions); 9460 if (lpfc_transport_template == NULL) 9461 return -ENOMEM; 9462 if (lpfc_enable_npiv) { 9463 lpfc_vport_transport_template = 9464 fc_attach_transport(&lpfc_vport_transport_functions); 9465 if (lpfc_vport_transport_template == NULL) { 9466 fc_release_transport(lpfc_transport_template); 9467 return -ENOMEM; 9468 } 9469 } 9470 error = pci_register_driver(&lpfc_driver); 9471 if (error) { 9472 fc_release_transport(lpfc_transport_template); 9473 if (lpfc_enable_npiv) 9474 fc_release_transport(lpfc_vport_transport_template); 9475 } 9476 9477 return error; 9478 } 9479 9480 /** 9481 * lpfc_exit - lpfc module removal routine 9482 * 9483 * This routine is invoked when the lpfc module is removed from the kernel. 9484 * The special kernel macro module_exit() is used to indicate the role of 9485 * this routine to the kernel as lpfc module exit point. 9486 */ 9487 static void __exit 9488 lpfc_exit(void) 9489 { 9490 pci_unregister_driver(&lpfc_driver); 9491 fc_release_transport(lpfc_transport_template); 9492 if (lpfc_enable_npiv) 9493 fc_release_transport(lpfc_vport_transport_template); 9494 if (_dump_buf_data) { 9495 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 9496 "_dump_buf_data at 0x%p\n", 9497 (1L << _dump_buf_data_order), _dump_buf_data); 9498 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 9499 } 9500 9501 if (_dump_buf_dif) { 9502 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 9503 "_dump_buf_dif at 0x%p\n", 9504 (1L << _dump_buf_dif_order), _dump_buf_dif); 9505 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 9506 } 9507 } 9508 9509 module_init(lpfc_init); 9510 module_exit(lpfc_exit); 9511 MODULE_LICENSE("GPL"); 9512 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 9513 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 9514 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 9515