1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 #include "lpfc_ids.h" 56 57 char *_dump_buf_data; 58 unsigned long _dump_buf_data_order; 59 char *_dump_buf_dif; 60 unsigned long _dump_buf_dif_order; 61 spinlock_t _dump_buf_lock; 62 63 /* Used when mapping IRQ vectors in a driver centric manner */ 64 uint16_t *lpfc_used_cpu; 65 uint32_t lpfc_present_cpu; 66 67 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 68 static int lpfc_post_rcv_buf(struct lpfc_hba *); 69 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 70 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 71 static int lpfc_setup_endian_order(struct lpfc_hba *); 72 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 73 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 74 static void lpfc_init_sgl_list(struct lpfc_hba *); 75 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 76 static void lpfc_free_active_sgl(struct lpfc_hba *); 77 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 78 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 79 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 81 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 82 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 83 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 84 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 85 86 static struct scsi_transport_template *lpfc_transport_template = NULL; 87 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 88 static DEFINE_IDR(lpfc_hba_index); 89 90 /** 91 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 92 * @phba: pointer to lpfc hba data structure. 93 * 94 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 95 * mailbox command. It retrieves the revision information from the HBA and 96 * collects the Vital Product Data (VPD) about the HBA for preparing the 97 * configuration of the HBA. 98 * 99 * Return codes: 100 * 0 - success. 101 * -ERESTART - requests the SLI layer to reset the HBA and try again. 102 * Any other value - indicates an error. 103 **/ 104 int 105 lpfc_config_port_prep(struct lpfc_hba *phba) 106 { 107 lpfc_vpd_t *vp = &phba->vpd; 108 int i = 0, rc; 109 LPFC_MBOXQ_t *pmb; 110 MAILBOX_t *mb; 111 char *lpfc_vpd_data = NULL; 112 uint16_t offset = 0; 113 static char licensed[56] = 114 "key unlock for use with gnu public licensed code only\0"; 115 static int init_key = 1; 116 117 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 118 if (!pmb) { 119 phba->link_state = LPFC_HBA_ERROR; 120 return -ENOMEM; 121 } 122 123 mb = &pmb->u.mb; 124 phba->link_state = LPFC_INIT_MBX_CMDS; 125 126 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 127 if (init_key) { 128 uint32_t *ptext = (uint32_t *) licensed; 129 130 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 131 *ptext = cpu_to_be32(*ptext); 132 init_key = 0; 133 } 134 135 lpfc_read_nv(phba, pmb); 136 memset((char*)mb->un.varRDnvp.rsvd3, 0, 137 sizeof (mb->un.varRDnvp.rsvd3)); 138 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 139 sizeof (licensed)); 140 141 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 142 143 if (rc != MBX_SUCCESS) { 144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 145 "0324 Config Port initialization " 146 "error, mbxCmd x%x READ_NVPARM, " 147 "mbxStatus x%x\n", 148 mb->mbxCommand, mb->mbxStatus); 149 mempool_free(pmb, phba->mbox_mem_pool); 150 return -ERESTART; 151 } 152 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 153 sizeof(phba->wwnn)); 154 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 155 sizeof(phba->wwpn)); 156 } 157 158 phba->sli3_options = 0x0; 159 160 /* Setup and issue mailbox READ REV command */ 161 lpfc_read_rev(phba, pmb); 162 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 163 if (rc != MBX_SUCCESS) { 164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 165 "0439 Adapter failed to init, mbxCmd x%x " 166 "READ_REV, mbxStatus x%x\n", 167 mb->mbxCommand, mb->mbxStatus); 168 mempool_free( pmb, phba->mbox_mem_pool); 169 return -ERESTART; 170 } 171 172 173 /* 174 * The value of rr must be 1 since the driver set the cv field to 1. 175 * This setting requires the FW to set all revision fields. 176 */ 177 if (mb->un.varRdRev.rr == 0) { 178 vp->rev.rBit = 0; 179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 180 "0440 Adapter failed to init, READ_REV has " 181 "missing revision information.\n"); 182 mempool_free(pmb, phba->mbox_mem_pool); 183 return -ERESTART; 184 } 185 186 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 187 mempool_free(pmb, phba->mbox_mem_pool); 188 return -EINVAL; 189 } 190 191 /* Save information as VPD data */ 192 vp->rev.rBit = 1; 193 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 194 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 195 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 196 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 197 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 198 vp->rev.biuRev = mb->un.varRdRev.biuRev; 199 vp->rev.smRev = mb->un.varRdRev.smRev; 200 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 201 vp->rev.endecRev = mb->un.varRdRev.endecRev; 202 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 203 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 204 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 205 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 206 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 207 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 208 209 /* If the sli feature level is less then 9, we must 210 * tear down all RPIs and VPIs on link down if NPIV 211 * is enabled. 212 */ 213 if (vp->rev.feaLevelHigh < 9) 214 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 215 216 if (lpfc_is_LC_HBA(phba->pcidev->device)) 217 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 218 sizeof (phba->RandomData)); 219 220 /* Get adapter VPD information */ 221 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 222 if (!lpfc_vpd_data) 223 goto out_free_mbox; 224 do { 225 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 226 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 227 228 if (rc != MBX_SUCCESS) { 229 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 230 "0441 VPD not present on adapter, " 231 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 232 mb->mbxCommand, mb->mbxStatus); 233 mb->un.varDmp.word_cnt = 0; 234 } 235 /* dump mem may return a zero when finished or we got a 236 * mailbox error, either way we are done. 237 */ 238 if (mb->un.varDmp.word_cnt == 0) 239 break; 240 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 241 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 242 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 243 lpfc_vpd_data + offset, 244 mb->un.varDmp.word_cnt); 245 offset += mb->un.varDmp.word_cnt; 246 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 247 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 248 249 kfree(lpfc_vpd_data); 250 out_free_mbox: 251 mempool_free(pmb, phba->mbox_mem_pool); 252 return 0; 253 } 254 255 /** 256 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 257 * @phba: pointer to lpfc hba data structure. 258 * @pmboxq: pointer to the driver internal queue element for mailbox command. 259 * 260 * This is the completion handler for driver's configuring asynchronous event 261 * mailbox command to the device. If the mailbox command returns successfully, 262 * it will set internal async event support flag to 1; otherwise, it will 263 * set internal async event support flag to 0. 264 **/ 265 static void 266 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 267 { 268 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 269 phba->temp_sensor_support = 1; 270 else 271 phba->temp_sensor_support = 0; 272 mempool_free(pmboxq, phba->mbox_mem_pool); 273 return; 274 } 275 276 /** 277 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for dump mailbox command for getting 282 * wake up parameters. When this command complete, the response contain 283 * Option rom version of the HBA. This function translate the version number 284 * into a human readable string and store it in OptionROMVersion. 285 **/ 286 static void 287 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 288 { 289 struct prog_id *prg; 290 uint32_t prog_id_word; 291 char dist = ' '; 292 /* character array used for decoding dist type. */ 293 char dist_char[] = "nabx"; 294 295 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 296 mempool_free(pmboxq, phba->mbox_mem_pool); 297 return; 298 } 299 300 prg = (struct prog_id *) &prog_id_word; 301 302 /* word 7 contain option rom version */ 303 prog_id_word = pmboxq->u.mb.un.varWords[7]; 304 305 /* Decode the Option rom version word to a readable string */ 306 if (prg->dist < 4) 307 dist = dist_char[prg->dist]; 308 309 if ((prg->dist == 3) && (prg->num == 0)) 310 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 311 prg->ver, prg->rev, prg->lev); 312 else 313 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 314 prg->ver, prg->rev, prg->lev, 315 dist, prg->num); 316 mempool_free(pmboxq, phba->mbox_mem_pool); 317 return; 318 } 319 320 /** 321 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 322 * cfg_soft_wwnn, cfg_soft_wwpn 323 * @vport: pointer to lpfc vport data structure. 324 * 325 * 326 * Return codes 327 * None. 328 **/ 329 void 330 lpfc_update_vport_wwn(struct lpfc_vport *vport) 331 { 332 /* If the soft name exists then update it using the service params */ 333 if (vport->phba->cfg_soft_wwnn) 334 u64_to_wwn(vport->phba->cfg_soft_wwnn, 335 vport->fc_sparam.nodeName.u.wwn); 336 if (vport->phba->cfg_soft_wwpn) 337 u64_to_wwn(vport->phba->cfg_soft_wwpn, 338 vport->fc_sparam.portName.u.wwn); 339 340 /* 341 * If the name is empty or there exists a soft name 342 * then copy the service params name, otherwise use the fc name 343 */ 344 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 345 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 346 sizeof(struct lpfc_name)); 347 else 348 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 349 sizeof(struct lpfc_name)); 350 351 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 352 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 353 sizeof(struct lpfc_name)); 354 else 355 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 356 sizeof(struct lpfc_name)); 357 } 358 359 /** 360 * lpfc_config_port_post - Perform lpfc initialization after config port 361 * @phba: pointer to lpfc hba data structure. 362 * 363 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 364 * command call. It performs all internal resource and state setups on the 365 * port: post IOCB buffers, enable appropriate host interrupt attentions, 366 * ELS ring timers, etc. 367 * 368 * Return codes 369 * 0 - success. 370 * Any other value - error. 371 **/ 372 int 373 lpfc_config_port_post(struct lpfc_hba *phba) 374 { 375 struct lpfc_vport *vport = phba->pport; 376 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 377 LPFC_MBOXQ_t *pmb; 378 MAILBOX_t *mb; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_sli *psli = &phba->sli; 381 uint32_t status, timeout; 382 int i, j; 383 int rc; 384 385 spin_lock_irq(&phba->hbalock); 386 /* 387 * If the Config port completed correctly the HBA is not 388 * over heated any more. 389 */ 390 if (phba->over_temp_state == HBA_OVER_TEMP) 391 phba->over_temp_state = HBA_NORMAL_TEMP; 392 spin_unlock_irq(&phba->hbalock); 393 394 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 395 if (!pmb) { 396 phba->link_state = LPFC_HBA_ERROR; 397 return -ENOMEM; 398 } 399 mb = &pmb->u.mb; 400 401 /* Get login parameters for NID. */ 402 rc = lpfc_read_sparam(phba, pmb, 0); 403 if (rc) { 404 mempool_free(pmb, phba->mbox_mem_pool); 405 return -ENOMEM; 406 } 407 408 pmb->vport = vport; 409 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 411 "0448 Adapter failed init, mbxCmd x%x " 412 "READ_SPARM mbxStatus x%x\n", 413 mb->mbxCommand, mb->mbxStatus); 414 phba->link_state = LPFC_HBA_ERROR; 415 mp = (struct lpfc_dmabuf *) pmb->context1; 416 mempool_free(pmb, phba->mbox_mem_pool); 417 lpfc_mbuf_free(phba, mp->virt, mp->phys); 418 kfree(mp); 419 return -EIO; 420 } 421 422 mp = (struct lpfc_dmabuf *) pmb->context1; 423 424 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 425 lpfc_mbuf_free(phba, mp->virt, mp->phys); 426 kfree(mp); 427 pmb->context1 = NULL; 428 lpfc_update_vport_wwn(vport); 429 430 /* Update the fc_host data structures with new wwn. */ 431 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 432 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 433 fc_host_max_npiv_vports(shost) = phba->max_vpi; 434 435 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 436 /* This should be consolidated into parse_vpd ? - mr */ 437 if (phba->SerialNumber[0] == 0) { 438 uint8_t *outptr; 439 440 outptr = &vport->fc_nodename.u.s.IEEE[0]; 441 for (i = 0; i < 12; i++) { 442 status = *outptr++; 443 j = ((status & 0xf0) >> 4); 444 if (j <= 9) 445 phba->SerialNumber[i] = 446 (char)((uint8_t) 0x30 + (uint8_t) j); 447 else 448 phba->SerialNumber[i] = 449 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 450 i++; 451 j = (status & 0xf); 452 if (j <= 9) 453 phba->SerialNumber[i] = 454 (char)((uint8_t) 0x30 + (uint8_t) j); 455 else 456 phba->SerialNumber[i] = 457 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 458 } 459 } 460 461 lpfc_read_config(phba, pmb); 462 pmb->vport = vport; 463 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 465 "0453 Adapter failed to init, mbxCmd x%x " 466 "READ_CONFIG, mbxStatus x%x\n", 467 mb->mbxCommand, mb->mbxStatus); 468 phba->link_state = LPFC_HBA_ERROR; 469 mempool_free( pmb, phba->mbox_mem_pool); 470 return -EIO; 471 } 472 473 /* Check if the port is disabled */ 474 lpfc_sli_read_link_ste(phba); 475 476 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 477 i = (mb->un.varRdConfig.max_xri + 1); 478 if (phba->cfg_hba_queue_depth > i) { 479 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 480 "3359 HBA queue depth changed from %d to %d\n", 481 phba->cfg_hba_queue_depth, i); 482 phba->cfg_hba_queue_depth = i; 483 } 484 485 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 486 i = (mb->un.varRdConfig.max_xri >> 3); 487 if (phba->pport->cfg_lun_queue_depth > i) { 488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 489 "3360 LUN queue depth changed from %d to %d\n", 490 phba->pport->cfg_lun_queue_depth, i); 491 phba->pport->cfg_lun_queue_depth = i; 492 } 493 494 phba->lmt = mb->un.varRdConfig.lmt; 495 496 /* Get the default values for Model Name and Description */ 497 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 498 499 phba->link_state = LPFC_LINK_DOWN; 500 501 /* Only process IOCBs on ELS ring till hba_state is READY */ 502 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 504 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 506 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 508 509 /* Post receive buffers for desired rings */ 510 if (phba->sli_rev != 3) 511 lpfc_post_rcv_buf(phba); 512 513 /* 514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 515 */ 516 if (phba->intr_type == MSIX) { 517 rc = lpfc_config_msi(phba, pmb); 518 if (rc) { 519 mempool_free(pmb, phba->mbox_mem_pool); 520 return -EIO; 521 } 522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 523 if (rc != MBX_SUCCESS) { 524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 525 "0352 Config MSI mailbox command " 526 "failed, mbxCmd x%x, mbxStatus x%x\n", 527 pmb->u.mb.mbxCommand, 528 pmb->u.mb.mbxStatus); 529 mempool_free(pmb, phba->mbox_mem_pool); 530 return -EIO; 531 } 532 } 533 534 spin_lock_irq(&phba->hbalock); 535 /* Initialize ERATT handling flag */ 536 phba->hba_flag &= ~HBA_ERATT_HANDLED; 537 538 /* Enable appropriate host interrupts */ 539 if (lpfc_readl(phba->HCregaddr, &status)) { 540 spin_unlock_irq(&phba->hbalock); 541 return -EIO; 542 } 543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 544 if (psli->num_rings > 0) 545 status |= HC_R0INT_ENA; 546 if (psli->num_rings > 1) 547 status |= HC_R1INT_ENA; 548 if (psli->num_rings > 2) 549 status |= HC_R2INT_ENA; 550 if (psli->num_rings > 3) 551 status |= HC_R3INT_ENA; 552 553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 554 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 555 status &= ~(HC_R0INT_ENA); 556 557 writel(status, phba->HCregaddr); 558 readl(phba->HCregaddr); /* flush */ 559 spin_unlock_irq(&phba->hbalock); 560 561 /* Set up ring-0 (ELS) timer */ 562 timeout = phba->fc_ratov * 2; 563 mod_timer(&vport->els_tmofunc, 564 jiffies + msecs_to_jiffies(1000 * timeout)); 565 /* Set up heart beat (HB) timer */ 566 mod_timer(&phba->hb_tmofunc, 567 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 568 phba->hb_outstanding = 0; 569 phba->last_completion_time = jiffies; 570 /* Set up error attention (ERATT) polling timer */ 571 mod_timer(&phba->eratt_poll, 572 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 573 574 if (phba->hba_flag & LINK_DISABLED) { 575 lpfc_printf_log(phba, 576 KERN_ERR, LOG_INIT, 577 "2598 Adapter Link is disabled.\n"); 578 lpfc_down_link(phba, pmb); 579 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 581 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 582 lpfc_printf_log(phba, 583 KERN_ERR, LOG_INIT, 584 "2599 Adapter failed to issue DOWN_LINK" 585 " mbox command rc 0x%x\n", rc); 586 587 mempool_free(pmb, phba->mbox_mem_pool); 588 return -EIO; 589 } 590 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 591 mempool_free(pmb, phba->mbox_mem_pool); 592 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 593 if (rc) 594 return rc; 595 } 596 /* MBOX buffer will be freed in mbox compl */ 597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 598 if (!pmb) { 599 phba->link_state = LPFC_HBA_ERROR; 600 return -ENOMEM; 601 } 602 603 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 604 pmb->mbox_cmpl = lpfc_config_async_cmpl; 605 pmb->vport = phba->pport; 606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 608 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 609 lpfc_printf_log(phba, 610 KERN_ERR, 611 LOG_INIT, 612 "0456 Adapter failed to issue " 613 "ASYNCEVT_ENABLE mbox status x%x\n", 614 rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 /* Get Option rom version */ 619 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 620 if (!pmb) { 621 phba->link_state = LPFC_HBA_ERROR; 622 return -ENOMEM; 623 } 624 625 lpfc_dump_wakeup_param(phba, pmb); 626 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 627 pmb->vport = phba->pport; 628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 629 630 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 632 "to get Option ROM version status x%x\n", rc); 633 mempool_free(pmb, phba->mbox_mem_pool); 634 } 635 636 return 0; 637 } 638 639 /** 640 * lpfc_hba_init_link - Initialize the FC link 641 * @phba: pointer to lpfc hba data structure. 642 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 643 * 644 * This routine will issue the INIT_LINK mailbox command call. 645 * It is available to other drivers through the lpfc_hba data 646 * structure for use as a delayed link up mechanism with the 647 * module parameter lpfc_suppress_link_up. 648 * 649 * Return code 650 * 0 - success 651 * Any other value - error 652 **/ 653 static int 654 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 655 { 656 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 657 } 658 659 /** 660 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 661 * @phba: pointer to lpfc hba data structure. 662 * @fc_topology: desired fc topology. 663 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 664 * 665 * This routine will issue the INIT_LINK mailbox command call. 666 * It is available to other drivers through the lpfc_hba data 667 * structure for use as a delayed link up mechanism with the 668 * module parameter lpfc_suppress_link_up. 669 * 670 * Return code 671 * 0 - success 672 * Any other value - error 673 **/ 674 int 675 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 676 uint32_t flag) 677 { 678 struct lpfc_vport *vport = phba->pport; 679 LPFC_MBOXQ_t *pmb; 680 MAILBOX_t *mb; 681 int rc; 682 683 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 684 if (!pmb) { 685 phba->link_state = LPFC_HBA_ERROR; 686 return -ENOMEM; 687 } 688 mb = &pmb->u.mb; 689 pmb->vport = vport; 690 691 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 692 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 693 !(phba->lmt & LMT_1Gb)) || 694 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 695 !(phba->lmt & LMT_2Gb)) || 696 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 697 !(phba->lmt & LMT_4Gb)) || 698 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 699 !(phba->lmt & LMT_8Gb)) || 700 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 701 !(phba->lmt & LMT_10Gb)) || 702 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 703 !(phba->lmt & LMT_16Gb)) || 704 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 705 !(phba->lmt & LMT_32Gb))) { 706 /* Reset link speed to auto */ 707 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 708 "1302 Invalid speed for this board:%d " 709 "Reset link speed to auto.\n", 710 phba->cfg_link_speed); 711 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 712 } 713 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 714 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 715 if (phba->sli_rev < LPFC_SLI_REV4) 716 lpfc_set_loopback_flag(phba); 717 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 718 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 720 "0498 Adapter failed to init, mbxCmd x%x " 721 "INIT_LINK, mbxStatus x%x\n", 722 mb->mbxCommand, mb->mbxStatus); 723 if (phba->sli_rev <= LPFC_SLI_REV3) { 724 /* Clear all interrupt enable conditions */ 725 writel(0, phba->HCregaddr); 726 readl(phba->HCregaddr); /* flush */ 727 /* Clear all pending interrupts */ 728 writel(0xffffffff, phba->HAregaddr); 729 readl(phba->HAregaddr); /* flush */ 730 } 731 phba->link_state = LPFC_HBA_ERROR; 732 if (rc != MBX_BUSY || flag == MBX_POLL) 733 mempool_free(pmb, phba->mbox_mem_pool); 734 return -EIO; 735 } 736 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 737 if (flag == MBX_POLL) 738 mempool_free(pmb, phba->mbox_mem_pool); 739 740 return 0; 741 } 742 743 /** 744 * lpfc_hba_down_link - this routine downs the FC link 745 * @phba: pointer to lpfc hba data structure. 746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 747 * 748 * This routine will issue the DOWN_LINK mailbox command call. 749 * It is available to other drivers through the lpfc_hba data 750 * structure for use to stop the link. 751 * 752 * Return code 753 * 0 - success 754 * Any other value - error 755 **/ 756 static int 757 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 758 { 759 LPFC_MBOXQ_t *pmb; 760 int rc; 761 762 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 763 if (!pmb) { 764 phba->link_state = LPFC_HBA_ERROR; 765 return -ENOMEM; 766 } 767 768 lpfc_printf_log(phba, 769 KERN_ERR, LOG_INIT, 770 "0491 Adapter Link is disabled.\n"); 771 lpfc_down_link(phba, pmb); 772 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 773 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 774 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 775 lpfc_printf_log(phba, 776 KERN_ERR, LOG_INIT, 777 "2522 Adapter failed to issue DOWN_LINK" 778 " mbox command rc 0x%x\n", rc); 779 780 mempool_free(pmb, phba->mbox_mem_pool); 781 return -EIO; 782 } 783 if (flag == MBX_POLL) 784 mempool_free(pmb, phba->mbox_mem_pool); 785 786 return 0; 787 } 788 789 /** 790 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 791 * @phba: pointer to lpfc HBA data structure. 792 * 793 * This routine will do LPFC uninitialization before the HBA is reset when 794 * bringing down the SLI Layer. 795 * 796 * Return codes 797 * 0 - success. 798 * Any other value - error. 799 **/ 800 int 801 lpfc_hba_down_prep(struct lpfc_hba *phba) 802 { 803 struct lpfc_vport **vports; 804 int i; 805 806 if (phba->sli_rev <= LPFC_SLI_REV3) { 807 /* Disable interrupts */ 808 writel(0, phba->HCregaddr); 809 readl(phba->HCregaddr); /* flush */ 810 } 811 812 if (phba->pport->load_flag & FC_UNLOADING) 813 lpfc_cleanup_discovery_resources(phba->pport); 814 else { 815 vports = lpfc_create_vport_work_array(phba); 816 if (vports != NULL) 817 for (i = 0; i <= phba->max_vports && 818 vports[i] != NULL; i++) 819 lpfc_cleanup_discovery_resources(vports[i]); 820 lpfc_destroy_vport_work_array(phba, vports); 821 } 822 return 0; 823 } 824 825 /** 826 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 827 * rspiocb which got deferred 828 * 829 * @phba: pointer to lpfc HBA data structure. 830 * 831 * This routine will cleanup completed slow path events after HBA is reset 832 * when bringing down the SLI Layer. 833 * 834 * 835 * Return codes 836 * void. 837 **/ 838 static void 839 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 840 { 841 struct lpfc_iocbq *rspiocbq; 842 struct hbq_dmabuf *dmabuf; 843 struct lpfc_cq_event *cq_event; 844 845 spin_lock_irq(&phba->hbalock); 846 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 847 spin_unlock_irq(&phba->hbalock); 848 849 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 850 /* Get the response iocb from the head of work queue */ 851 spin_lock_irq(&phba->hbalock); 852 list_remove_head(&phba->sli4_hba.sp_queue_event, 853 cq_event, struct lpfc_cq_event, list); 854 spin_unlock_irq(&phba->hbalock); 855 856 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 857 case CQE_CODE_COMPL_WQE: 858 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 859 cq_event); 860 lpfc_sli_release_iocbq(phba, rspiocbq); 861 break; 862 case CQE_CODE_RECEIVE: 863 case CQE_CODE_RECEIVE_V1: 864 dmabuf = container_of(cq_event, struct hbq_dmabuf, 865 cq_event); 866 lpfc_in_buf_free(phba, &dmabuf->dbuf); 867 } 868 } 869 } 870 871 /** 872 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 873 * @phba: pointer to lpfc HBA data structure. 874 * 875 * This routine will cleanup posted ELS buffers after the HBA is reset 876 * when bringing down the SLI Layer. 877 * 878 * 879 * Return codes 880 * void. 881 **/ 882 static void 883 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 884 { 885 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli_ring *pring; 887 struct lpfc_dmabuf *mp, *next_mp; 888 LIST_HEAD(buflist); 889 int count; 890 891 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 892 lpfc_sli_hbqbuf_free_all(phba); 893 else { 894 /* Cleanup preposted buffers on the ELS ring */ 895 pring = &psli->ring[LPFC_ELS_RING]; 896 spin_lock_irq(&phba->hbalock); 897 list_splice_init(&pring->postbufq, &buflist); 898 spin_unlock_irq(&phba->hbalock); 899 900 count = 0; 901 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 902 list_del(&mp->list); 903 count++; 904 lpfc_mbuf_free(phba, mp->virt, mp->phys); 905 kfree(mp); 906 } 907 908 spin_lock_irq(&phba->hbalock); 909 pring->postbufq_cnt -= count; 910 spin_unlock_irq(&phba->hbalock); 911 } 912 } 913 914 /** 915 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 916 * @phba: pointer to lpfc HBA data structure. 917 * 918 * This routine will cleanup the txcmplq after the HBA is reset when bringing 919 * down the SLI Layer. 920 * 921 * Return codes 922 * void 923 **/ 924 static void 925 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 926 { 927 struct lpfc_sli *psli = &phba->sli; 928 struct lpfc_sli_ring *pring; 929 LIST_HEAD(completions); 930 int i; 931 932 for (i = 0; i < psli->num_rings; i++) { 933 pring = &psli->ring[i]; 934 if (phba->sli_rev >= LPFC_SLI_REV4) 935 spin_lock_irq(&pring->ring_lock); 936 else 937 spin_lock_irq(&phba->hbalock); 938 /* At this point in time the HBA is either reset or DOA. Either 939 * way, nothing should be on txcmplq as it will NEVER complete. 940 */ 941 list_splice_init(&pring->txcmplq, &completions); 942 pring->txcmplq_cnt = 0; 943 944 if (phba->sli_rev >= LPFC_SLI_REV4) 945 spin_unlock_irq(&pring->ring_lock); 946 else 947 spin_unlock_irq(&phba->hbalock); 948 949 /* Cancel all the IOCBs from the completions list */ 950 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 951 IOERR_SLI_ABORTED); 952 lpfc_sli_abort_iocb_ring(phba, pring); 953 } 954 } 955 956 /** 957 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 958 int i; 959 * @phba: pointer to lpfc HBA data structure. 960 * 961 * This routine will do uninitialization after the HBA is reset when bring 962 * down the SLI Layer. 963 * 964 * Return codes 965 * 0 - success. 966 * Any other value - error. 967 **/ 968 static int 969 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 970 { 971 lpfc_hba_free_post_buf(phba); 972 lpfc_hba_clean_txcmplq(phba); 973 return 0; 974 } 975 976 /** 977 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 978 * @phba: pointer to lpfc HBA data structure. 979 * 980 * This routine will do uninitialization after the HBA is reset when bring 981 * down the SLI Layer. 982 * 983 * Return codes 984 * 0 - success. 985 * Any other value - error. 986 **/ 987 static int 988 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 989 { 990 struct lpfc_scsi_buf *psb, *psb_next; 991 LIST_HEAD(aborts); 992 unsigned long iflag = 0; 993 struct lpfc_sglq *sglq_entry = NULL; 994 struct lpfc_sli *psli = &phba->sli; 995 struct lpfc_sli_ring *pring; 996 997 lpfc_hba_free_post_buf(phba); 998 lpfc_hba_clean_txcmplq(phba); 999 pring = &psli->ring[LPFC_ELS_RING]; 1000 1001 /* At this point in time the HBA is either reset or DOA. Either 1002 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1003 * on the lpfc_sgl_list so that it can either be freed if the 1004 * driver is unloading or reposted if the driver is restarting 1005 * the port. 1006 */ 1007 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 1008 /* scsl_buf_list */ 1009 /* abts_sgl_list_lock required because worker thread uses this 1010 * list. 1011 */ 1012 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 1013 list_for_each_entry(sglq_entry, 1014 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1015 sglq_entry->state = SGL_FREED; 1016 1017 spin_lock(&pring->ring_lock); 1018 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1019 &phba->sli4_hba.lpfc_sgl_list); 1020 spin_unlock(&pring->ring_lock); 1021 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1022 /* abts_scsi_buf_list_lock required because worker thread uses this 1023 * list. 1024 */ 1025 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1026 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1027 &aborts); 1028 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1029 spin_unlock_irq(&phba->hbalock); 1030 1031 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1032 psb->pCmd = NULL; 1033 psb->status = IOSTAT_SUCCESS; 1034 } 1035 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1036 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1037 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1038 1039 lpfc_sli4_free_sp_events(phba); 1040 return 0; 1041 } 1042 1043 /** 1044 * lpfc_hba_down_post - Wrapper func for hba down post routine 1045 * @phba: pointer to lpfc HBA data structure. 1046 * 1047 * This routine wraps the actual SLI3 or SLI4 routine for performing 1048 * uninitialization after the HBA is reset when bring down the SLI Layer. 1049 * 1050 * Return codes 1051 * 0 - success. 1052 * Any other value - error. 1053 **/ 1054 int 1055 lpfc_hba_down_post(struct lpfc_hba *phba) 1056 { 1057 return (*phba->lpfc_hba_down_post)(phba); 1058 } 1059 1060 /** 1061 * lpfc_hb_timeout - The HBA-timer timeout handler 1062 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1063 * 1064 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1065 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1066 * work-port-events bitmap and the worker thread is notified. This timeout 1067 * event will be used by the worker thread to invoke the actual timeout 1068 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1069 * be performed in the timeout handler and the HBA timeout event bit shall 1070 * be cleared by the worker thread after it has taken the event bitmap out. 1071 **/ 1072 static void 1073 lpfc_hb_timeout(unsigned long ptr) 1074 { 1075 struct lpfc_hba *phba; 1076 uint32_t tmo_posted; 1077 unsigned long iflag; 1078 1079 phba = (struct lpfc_hba *)ptr; 1080 1081 /* Check for heart beat timeout conditions */ 1082 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1083 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1084 if (!tmo_posted) 1085 phba->pport->work_port_events |= WORKER_HB_TMO; 1086 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1087 1088 /* Tell the worker thread there is work to do */ 1089 if (!tmo_posted) 1090 lpfc_worker_wake_up(phba); 1091 return; 1092 } 1093 1094 /** 1095 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1096 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1097 * 1098 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1099 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1100 * work-port-events bitmap and the worker thread is notified. This timeout 1101 * event will be used by the worker thread to invoke the actual timeout 1102 * handler routine, lpfc_rrq_handler. Any periodical operations will 1103 * be performed in the timeout handler and the RRQ timeout event bit shall 1104 * be cleared by the worker thread after it has taken the event bitmap out. 1105 **/ 1106 static void 1107 lpfc_rrq_timeout(unsigned long ptr) 1108 { 1109 struct lpfc_hba *phba; 1110 unsigned long iflag; 1111 1112 phba = (struct lpfc_hba *)ptr; 1113 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1114 if (!(phba->pport->load_flag & FC_UNLOADING)) 1115 phba->hba_flag |= HBA_RRQ_ACTIVE; 1116 else 1117 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1118 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1119 1120 if (!(phba->pport->load_flag & FC_UNLOADING)) 1121 lpfc_worker_wake_up(phba); 1122 } 1123 1124 /** 1125 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1126 * @phba: pointer to lpfc hba data structure. 1127 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1128 * 1129 * This is the callback function to the lpfc heart-beat mailbox command. 1130 * If configured, the lpfc driver issues the heart-beat mailbox command to 1131 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1132 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1133 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1134 * heart-beat outstanding state. Once the mailbox command comes back and 1135 * no error conditions detected, the heart-beat mailbox command timer is 1136 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1137 * state is cleared for the next heart-beat. If the timer expired with the 1138 * heart-beat outstanding state set, the driver will put the HBA offline. 1139 **/ 1140 static void 1141 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1142 { 1143 unsigned long drvr_flag; 1144 1145 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1146 phba->hb_outstanding = 0; 1147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1148 1149 /* Check and reset heart-beat timer is necessary */ 1150 mempool_free(pmboxq, phba->mbox_mem_pool); 1151 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1152 !(phba->link_state == LPFC_HBA_ERROR) && 1153 !(phba->pport->load_flag & FC_UNLOADING)) 1154 mod_timer(&phba->hb_tmofunc, 1155 jiffies + 1156 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1157 return; 1158 } 1159 1160 /** 1161 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1162 * @phba: pointer to lpfc hba data structure. 1163 * 1164 * This is the actual HBA-timer timeout handler to be invoked by the worker 1165 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1166 * handler performs any periodic operations needed for the device. If such 1167 * periodic event has already been attended to either in the interrupt handler 1168 * or by processing slow-ring or fast-ring events within the HBA-timer 1169 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1170 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1171 * is configured and there is no heart-beat mailbox command outstanding, a 1172 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1173 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1174 * to offline. 1175 **/ 1176 void 1177 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1178 { 1179 struct lpfc_vport **vports; 1180 LPFC_MBOXQ_t *pmboxq; 1181 struct lpfc_dmabuf *buf_ptr; 1182 int retval, i; 1183 struct lpfc_sli *psli = &phba->sli; 1184 LIST_HEAD(completions); 1185 1186 vports = lpfc_create_vport_work_array(phba); 1187 if (vports != NULL) 1188 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1189 lpfc_rcv_seq_check_edtov(vports[i]); 1190 lpfc_fdmi_num_disc_check(vports[i]); 1191 } 1192 lpfc_destroy_vport_work_array(phba, vports); 1193 1194 if ((phba->link_state == LPFC_HBA_ERROR) || 1195 (phba->pport->load_flag & FC_UNLOADING) || 1196 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1197 return; 1198 1199 spin_lock_irq(&phba->pport->work_port_lock); 1200 1201 if (time_after(phba->last_completion_time + 1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1203 jiffies)) { 1204 spin_unlock_irq(&phba->pport->work_port_lock); 1205 if (!phba->hb_outstanding) 1206 mod_timer(&phba->hb_tmofunc, 1207 jiffies + 1208 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1209 else 1210 mod_timer(&phba->hb_tmofunc, 1211 jiffies + 1212 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1213 return; 1214 } 1215 spin_unlock_irq(&phba->pport->work_port_lock); 1216 1217 if (phba->elsbuf_cnt && 1218 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1219 spin_lock_irq(&phba->hbalock); 1220 list_splice_init(&phba->elsbuf, &completions); 1221 phba->elsbuf_cnt = 0; 1222 phba->elsbuf_prev_cnt = 0; 1223 spin_unlock_irq(&phba->hbalock); 1224 1225 while (!list_empty(&completions)) { 1226 list_remove_head(&completions, buf_ptr, 1227 struct lpfc_dmabuf, list); 1228 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1229 kfree(buf_ptr); 1230 } 1231 } 1232 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1233 1234 /* If there is no heart beat outstanding, issue a heartbeat command */ 1235 if (phba->cfg_enable_hba_heartbeat) { 1236 if (!phba->hb_outstanding) { 1237 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1238 (list_empty(&psli->mboxq))) { 1239 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1240 GFP_KERNEL); 1241 if (!pmboxq) { 1242 mod_timer(&phba->hb_tmofunc, 1243 jiffies + 1244 msecs_to_jiffies(1000 * 1245 LPFC_HB_MBOX_INTERVAL)); 1246 return; 1247 } 1248 1249 lpfc_heart_beat(phba, pmboxq); 1250 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1251 pmboxq->vport = phba->pport; 1252 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1253 MBX_NOWAIT); 1254 1255 if (retval != MBX_BUSY && 1256 retval != MBX_SUCCESS) { 1257 mempool_free(pmboxq, 1258 phba->mbox_mem_pool); 1259 mod_timer(&phba->hb_tmofunc, 1260 jiffies + 1261 msecs_to_jiffies(1000 * 1262 LPFC_HB_MBOX_INTERVAL)); 1263 return; 1264 } 1265 phba->skipped_hb = 0; 1266 phba->hb_outstanding = 1; 1267 } else if (time_before_eq(phba->last_completion_time, 1268 phba->skipped_hb)) { 1269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1270 "2857 Last completion time not " 1271 " updated in %d ms\n", 1272 jiffies_to_msecs(jiffies 1273 - phba->last_completion_time)); 1274 } else 1275 phba->skipped_hb = jiffies; 1276 1277 mod_timer(&phba->hb_tmofunc, 1278 jiffies + 1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1280 return; 1281 } else { 1282 /* 1283 * If heart beat timeout called with hb_outstanding set 1284 * we need to give the hb mailbox cmd a chance to 1285 * complete or TMO. 1286 */ 1287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1288 "0459 Adapter heartbeat still out" 1289 "standing:last compl time was %d ms.\n", 1290 jiffies_to_msecs(jiffies 1291 - phba->last_completion_time)); 1292 mod_timer(&phba->hb_tmofunc, 1293 jiffies + 1294 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1295 } 1296 } else { 1297 mod_timer(&phba->hb_tmofunc, 1298 jiffies + 1299 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1300 } 1301 } 1302 1303 /** 1304 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1305 * @phba: pointer to lpfc hba data structure. 1306 * 1307 * This routine is called to bring the HBA offline when HBA hardware error 1308 * other than Port Error 6 has been detected. 1309 **/ 1310 static void 1311 lpfc_offline_eratt(struct lpfc_hba *phba) 1312 { 1313 struct lpfc_sli *psli = &phba->sli; 1314 1315 spin_lock_irq(&phba->hbalock); 1316 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1317 spin_unlock_irq(&phba->hbalock); 1318 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1319 1320 lpfc_offline(phba); 1321 lpfc_reset_barrier(phba); 1322 spin_lock_irq(&phba->hbalock); 1323 lpfc_sli_brdreset(phba); 1324 spin_unlock_irq(&phba->hbalock); 1325 lpfc_hba_down_post(phba); 1326 lpfc_sli_brdready(phba, HS_MBRDY); 1327 lpfc_unblock_mgmt_io(phba); 1328 phba->link_state = LPFC_HBA_ERROR; 1329 return; 1330 } 1331 1332 /** 1333 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1334 * @phba: pointer to lpfc hba data structure. 1335 * 1336 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1337 * other than Port Error 6 has been detected. 1338 **/ 1339 void 1340 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1341 { 1342 spin_lock_irq(&phba->hbalock); 1343 phba->link_state = LPFC_HBA_ERROR; 1344 spin_unlock_irq(&phba->hbalock); 1345 1346 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1347 lpfc_offline(phba); 1348 lpfc_hba_down_post(phba); 1349 lpfc_unblock_mgmt_io(phba); 1350 } 1351 1352 /** 1353 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1354 * @phba: pointer to lpfc hba data structure. 1355 * 1356 * This routine is invoked to handle the deferred HBA hardware error 1357 * conditions. This type of error is indicated by HBA by setting ER1 1358 * and another ER bit in the host status register. The driver will 1359 * wait until the ER1 bit clears before handling the error condition. 1360 **/ 1361 static void 1362 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1363 { 1364 uint32_t old_host_status = phba->work_hs; 1365 struct lpfc_sli *psli = &phba->sli; 1366 1367 /* If the pci channel is offline, ignore possible errors, 1368 * since we cannot communicate with the pci card anyway. 1369 */ 1370 if (pci_channel_offline(phba->pcidev)) { 1371 spin_lock_irq(&phba->hbalock); 1372 phba->hba_flag &= ~DEFER_ERATT; 1373 spin_unlock_irq(&phba->hbalock); 1374 return; 1375 } 1376 1377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1378 "0479 Deferred Adapter Hardware Error " 1379 "Data: x%x x%x x%x\n", 1380 phba->work_hs, 1381 phba->work_status[0], phba->work_status[1]); 1382 1383 spin_lock_irq(&phba->hbalock); 1384 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1385 spin_unlock_irq(&phba->hbalock); 1386 1387 1388 /* 1389 * Firmware stops when it triggred erratt. That could cause the I/Os 1390 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1391 * SCSI layer retry it after re-establishing link. 1392 */ 1393 lpfc_sli_abort_fcp_rings(phba); 1394 1395 /* 1396 * There was a firmware error. Take the hba offline and then 1397 * attempt to restart it. 1398 */ 1399 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1400 lpfc_offline(phba); 1401 1402 /* Wait for the ER1 bit to clear.*/ 1403 while (phba->work_hs & HS_FFER1) { 1404 msleep(100); 1405 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1406 phba->work_hs = UNPLUG_ERR ; 1407 break; 1408 } 1409 /* If driver is unloading let the worker thread continue */ 1410 if (phba->pport->load_flag & FC_UNLOADING) { 1411 phba->work_hs = 0; 1412 break; 1413 } 1414 } 1415 1416 /* 1417 * This is to ptrotect against a race condition in which 1418 * first write to the host attention register clear the 1419 * host status register. 1420 */ 1421 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1422 phba->work_hs = old_host_status & ~HS_FFER1; 1423 1424 spin_lock_irq(&phba->hbalock); 1425 phba->hba_flag &= ~DEFER_ERATT; 1426 spin_unlock_irq(&phba->hbalock); 1427 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1428 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1429 } 1430 1431 static void 1432 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1433 { 1434 struct lpfc_board_event_header board_event; 1435 struct Scsi_Host *shost; 1436 1437 board_event.event_type = FC_REG_BOARD_EVENT; 1438 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1439 shost = lpfc_shost_from_vport(phba->pport); 1440 fc_host_post_vendor_event(shost, fc_get_event_number(), 1441 sizeof(board_event), 1442 (char *) &board_event, 1443 LPFC_NL_VENDOR_ID); 1444 } 1445 1446 /** 1447 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1448 * @phba: pointer to lpfc hba data structure. 1449 * 1450 * This routine is invoked to handle the following HBA hardware error 1451 * conditions: 1452 * 1 - HBA error attention interrupt 1453 * 2 - DMA ring index out of range 1454 * 3 - Mailbox command came back as unknown 1455 **/ 1456 static void 1457 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1458 { 1459 struct lpfc_vport *vport = phba->pport; 1460 struct lpfc_sli *psli = &phba->sli; 1461 uint32_t event_data; 1462 unsigned long temperature; 1463 struct temp_event temp_event_data; 1464 struct Scsi_Host *shost; 1465 1466 /* If the pci channel is offline, ignore possible errors, 1467 * since we cannot communicate with the pci card anyway. 1468 */ 1469 if (pci_channel_offline(phba->pcidev)) { 1470 spin_lock_irq(&phba->hbalock); 1471 phba->hba_flag &= ~DEFER_ERATT; 1472 spin_unlock_irq(&phba->hbalock); 1473 return; 1474 } 1475 1476 /* If resets are disabled then leave the HBA alone and return */ 1477 if (!phba->cfg_enable_hba_reset) 1478 return; 1479 1480 /* Send an internal error event to mgmt application */ 1481 lpfc_board_errevt_to_mgmt(phba); 1482 1483 if (phba->hba_flag & DEFER_ERATT) 1484 lpfc_handle_deferred_eratt(phba); 1485 1486 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1487 if (phba->work_hs & HS_FFER6) 1488 /* Re-establishing Link */ 1489 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1490 "1301 Re-establishing Link " 1491 "Data: x%x x%x x%x\n", 1492 phba->work_hs, phba->work_status[0], 1493 phba->work_status[1]); 1494 if (phba->work_hs & HS_FFER8) 1495 /* Device Zeroization */ 1496 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1497 "2861 Host Authentication device " 1498 "zeroization Data:x%x x%x x%x\n", 1499 phba->work_hs, phba->work_status[0], 1500 phba->work_status[1]); 1501 1502 spin_lock_irq(&phba->hbalock); 1503 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1504 spin_unlock_irq(&phba->hbalock); 1505 1506 /* 1507 * Firmware stops when it triggled erratt with HS_FFER6. 1508 * That could cause the I/Os dropped by the firmware. 1509 * Error iocb (I/O) on txcmplq and let the SCSI layer 1510 * retry it after re-establishing link. 1511 */ 1512 lpfc_sli_abort_fcp_rings(phba); 1513 1514 /* 1515 * There was a firmware error. Take the hba offline and then 1516 * attempt to restart it. 1517 */ 1518 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1519 lpfc_offline(phba); 1520 lpfc_sli_brdrestart(phba); 1521 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1522 lpfc_unblock_mgmt_io(phba); 1523 return; 1524 } 1525 lpfc_unblock_mgmt_io(phba); 1526 } else if (phba->work_hs & HS_CRIT_TEMP) { 1527 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1528 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1529 temp_event_data.event_code = LPFC_CRIT_TEMP; 1530 temp_event_data.data = (uint32_t)temperature; 1531 1532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1533 "0406 Adapter maximum temperature exceeded " 1534 "(%ld), taking this port offline " 1535 "Data: x%x x%x x%x\n", 1536 temperature, phba->work_hs, 1537 phba->work_status[0], phba->work_status[1]); 1538 1539 shost = lpfc_shost_from_vport(phba->pport); 1540 fc_host_post_vendor_event(shost, fc_get_event_number(), 1541 sizeof(temp_event_data), 1542 (char *) &temp_event_data, 1543 SCSI_NL_VID_TYPE_PCI 1544 | PCI_VENDOR_ID_EMULEX); 1545 1546 spin_lock_irq(&phba->hbalock); 1547 phba->over_temp_state = HBA_OVER_TEMP; 1548 spin_unlock_irq(&phba->hbalock); 1549 lpfc_offline_eratt(phba); 1550 1551 } else { 1552 /* The if clause above forces this code path when the status 1553 * failure is a value other than FFER6. Do not call the offline 1554 * twice. This is the adapter hardware error path. 1555 */ 1556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1557 "0457 Adapter Hardware Error " 1558 "Data: x%x x%x x%x\n", 1559 phba->work_hs, 1560 phba->work_status[0], phba->work_status[1]); 1561 1562 event_data = FC_REG_DUMP_EVENT; 1563 shost = lpfc_shost_from_vport(vport); 1564 fc_host_post_vendor_event(shost, fc_get_event_number(), 1565 sizeof(event_data), (char *) &event_data, 1566 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1567 1568 lpfc_offline_eratt(phba); 1569 } 1570 return; 1571 } 1572 1573 /** 1574 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1575 * @phba: pointer to lpfc hba data structure. 1576 * @mbx_action: flag for mailbox shutdown action. 1577 * 1578 * This routine is invoked to perform an SLI4 port PCI function reset in 1579 * response to port status register polling attention. It waits for port 1580 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1581 * During this process, interrupt vectors are freed and later requested 1582 * for handling possible port resource change. 1583 **/ 1584 static int 1585 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1586 bool en_rn_msg) 1587 { 1588 int rc; 1589 uint32_t intr_mode; 1590 1591 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1592 LPFC_SLI_INTF_IF_TYPE_2) { 1593 /* 1594 * On error status condition, driver need to wait for port 1595 * ready before performing reset. 1596 */ 1597 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1598 if (rc) 1599 return rc; 1600 } 1601 1602 /* need reset: attempt for port recovery */ 1603 if (en_rn_msg) 1604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1605 "2887 Reset Needed: Attempting Port " 1606 "Recovery...\n"); 1607 lpfc_offline_prep(phba, mbx_action); 1608 lpfc_offline(phba); 1609 /* release interrupt for possible resource change */ 1610 lpfc_sli4_disable_intr(phba); 1611 lpfc_sli_brdrestart(phba); 1612 /* request and enable interrupt */ 1613 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1614 if (intr_mode == LPFC_INTR_ERROR) { 1615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1616 "3175 Failed to enable interrupt\n"); 1617 return -EIO; 1618 } 1619 phba->intr_mode = intr_mode; 1620 rc = lpfc_online(phba); 1621 if (rc == 0) 1622 lpfc_unblock_mgmt_io(phba); 1623 1624 return rc; 1625 } 1626 1627 /** 1628 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1629 * @phba: pointer to lpfc hba data structure. 1630 * 1631 * This routine is invoked to handle the SLI4 HBA hardware error attention 1632 * conditions. 1633 **/ 1634 static void 1635 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1636 { 1637 struct lpfc_vport *vport = phba->pport; 1638 uint32_t event_data; 1639 struct Scsi_Host *shost; 1640 uint32_t if_type; 1641 struct lpfc_register portstat_reg = {0}; 1642 uint32_t reg_err1, reg_err2; 1643 uint32_t uerrlo_reg, uemasklo_reg; 1644 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1645 bool en_rn_msg = true; 1646 struct temp_event temp_event_data; 1647 struct lpfc_register portsmphr_reg; 1648 int rc, i; 1649 1650 /* If the pci channel is offline, ignore possible errors, since 1651 * we cannot communicate with the pci card anyway. 1652 */ 1653 if (pci_channel_offline(phba->pcidev)) 1654 return; 1655 1656 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1657 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1658 switch (if_type) { 1659 case LPFC_SLI_INTF_IF_TYPE_0: 1660 pci_rd_rc1 = lpfc_readl( 1661 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1662 &uerrlo_reg); 1663 pci_rd_rc2 = lpfc_readl( 1664 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1665 &uemasklo_reg); 1666 /* consider PCI bus read error as pci_channel_offline */ 1667 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1668 return; 1669 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1670 lpfc_sli4_offline_eratt(phba); 1671 return; 1672 } 1673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1674 "7623 Checking UE recoverable"); 1675 1676 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1677 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1678 &portsmphr_reg.word0)) 1679 continue; 1680 1681 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1682 &portsmphr_reg); 1683 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1684 LPFC_PORT_SEM_UE_RECOVERABLE) 1685 break; 1686 /*Sleep for 1Sec, before checking SEMAPHORE */ 1687 msleep(1000); 1688 } 1689 1690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1691 "4827 smphr_port_status x%x : Waited %dSec", 1692 smphr_port_status, i); 1693 1694 /* Recoverable UE, reset the HBA device */ 1695 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1696 LPFC_PORT_SEM_UE_RECOVERABLE) { 1697 for (i = 0; i < 20; i++) { 1698 msleep(1000); 1699 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1700 &portsmphr_reg.word0) && 1701 (LPFC_POST_STAGE_PORT_READY == 1702 bf_get(lpfc_port_smphr_port_status, 1703 &portsmphr_reg))) { 1704 rc = lpfc_sli4_port_sta_fn_reset(phba, 1705 LPFC_MBX_NO_WAIT, en_rn_msg); 1706 if (rc == 0) 1707 return; 1708 lpfc_printf_log(phba, 1709 KERN_ERR, LOG_INIT, 1710 "4215 Failed to recover UE"); 1711 break; 1712 } 1713 } 1714 } 1715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1716 "7624 Firmware not ready: Failing UE recovery," 1717 " waited %dSec", i); 1718 lpfc_sli4_offline_eratt(phba); 1719 break; 1720 1721 case LPFC_SLI_INTF_IF_TYPE_2: 1722 pci_rd_rc1 = lpfc_readl( 1723 phba->sli4_hba.u.if_type2.STATUSregaddr, 1724 &portstat_reg.word0); 1725 /* consider PCI bus read error as pci_channel_offline */ 1726 if (pci_rd_rc1 == -EIO) { 1727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1728 "3151 PCI bus read access failure: x%x\n", 1729 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1730 return; 1731 } 1732 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1733 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1734 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1736 "2889 Port Overtemperature event, " 1737 "taking port offline Data: x%x x%x\n", 1738 reg_err1, reg_err2); 1739 1740 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1741 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1742 temp_event_data.event_code = LPFC_CRIT_TEMP; 1743 temp_event_data.data = 0xFFFFFFFF; 1744 1745 shost = lpfc_shost_from_vport(phba->pport); 1746 fc_host_post_vendor_event(shost, fc_get_event_number(), 1747 sizeof(temp_event_data), 1748 (char *)&temp_event_data, 1749 SCSI_NL_VID_TYPE_PCI 1750 | PCI_VENDOR_ID_EMULEX); 1751 1752 spin_lock_irq(&phba->hbalock); 1753 phba->over_temp_state = HBA_OVER_TEMP; 1754 spin_unlock_irq(&phba->hbalock); 1755 lpfc_sli4_offline_eratt(phba); 1756 return; 1757 } 1758 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1759 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1761 "3143 Port Down: Firmware Update " 1762 "Detected\n"); 1763 en_rn_msg = false; 1764 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1765 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1767 "3144 Port Down: Debug Dump\n"); 1768 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1769 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1771 "3145 Port Down: Provisioning\n"); 1772 1773 /* If resets are disabled then leave the HBA alone and return */ 1774 if (!phba->cfg_enable_hba_reset) 1775 return; 1776 1777 /* Check port status register for function reset */ 1778 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1779 en_rn_msg); 1780 if (rc == 0) { 1781 /* don't report event on forced debug dump */ 1782 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1783 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1784 return; 1785 else 1786 break; 1787 } 1788 /* fall through for not able to recover */ 1789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1790 "3152 Unrecoverable error, bring the port " 1791 "offline\n"); 1792 lpfc_sli4_offline_eratt(phba); 1793 break; 1794 case LPFC_SLI_INTF_IF_TYPE_1: 1795 default: 1796 break; 1797 } 1798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1799 "3123 Report dump event to upper layer\n"); 1800 /* Send an internal error event to mgmt application */ 1801 lpfc_board_errevt_to_mgmt(phba); 1802 1803 event_data = FC_REG_DUMP_EVENT; 1804 shost = lpfc_shost_from_vport(vport); 1805 fc_host_post_vendor_event(shost, fc_get_event_number(), 1806 sizeof(event_data), (char *) &event_data, 1807 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1808 } 1809 1810 /** 1811 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1812 * @phba: pointer to lpfc HBA data structure. 1813 * 1814 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1815 * routine from the API jump table function pointer from the lpfc_hba struct. 1816 * 1817 * Return codes 1818 * 0 - success. 1819 * Any other value - error. 1820 **/ 1821 void 1822 lpfc_handle_eratt(struct lpfc_hba *phba) 1823 { 1824 (*phba->lpfc_handle_eratt)(phba); 1825 } 1826 1827 /** 1828 * lpfc_handle_latt - The HBA link event handler 1829 * @phba: pointer to lpfc hba data structure. 1830 * 1831 * This routine is invoked from the worker thread to handle a HBA host 1832 * attention link event. 1833 **/ 1834 void 1835 lpfc_handle_latt(struct lpfc_hba *phba) 1836 { 1837 struct lpfc_vport *vport = phba->pport; 1838 struct lpfc_sli *psli = &phba->sli; 1839 LPFC_MBOXQ_t *pmb; 1840 volatile uint32_t control; 1841 struct lpfc_dmabuf *mp; 1842 int rc = 0; 1843 1844 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1845 if (!pmb) { 1846 rc = 1; 1847 goto lpfc_handle_latt_err_exit; 1848 } 1849 1850 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1851 if (!mp) { 1852 rc = 2; 1853 goto lpfc_handle_latt_free_pmb; 1854 } 1855 1856 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1857 if (!mp->virt) { 1858 rc = 3; 1859 goto lpfc_handle_latt_free_mp; 1860 } 1861 1862 /* Cleanup any outstanding ELS commands */ 1863 lpfc_els_flush_all_cmd(phba); 1864 1865 psli->slistat.link_event++; 1866 lpfc_read_topology(phba, pmb, mp); 1867 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1868 pmb->vport = vport; 1869 /* Block ELS IOCBs until we have processed this mbox command */ 1870 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1871 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1872 if (rc == MBX_NOT_FINISHED) { 1873 rc = 4; 1874 goto lpfc_handle_latt_free_mbuf; 1875 } 1876 1877 /* Clear Link Attention in HA REG */ 1878 spin_lock_irq(&phba->hbalock); 1879 writel(HA_LATT, phba->HAregaddr); 1880 readl(phba->HAregaddr); /* flush */ 1881 spin_unlock_irq(&phba->hbalock); 1882 1883 return; 1884 1885 lpfc_handle_latt_free_mbuf: 1886 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1887 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1888 lpfc_handle_latt_free_mp: 1889 kfree(mp); 1890 lpfc_handle_latt_free_pmb: 1891 mempool_free(pmb, phba->mbox_mem_pool); 1892 lpfc_handle_latt_err_exit: 1893 /* Enable Link attention interrupts */ 1894 spin_lock_irq(&phba->hbalock); 1895 psli->sli_flag |= LPFC_PROCESS_LA; 1896 control = readl(phba->HCregaddr); 1897 control |= HC_LAINT_ENA; 1898 writel(control, phba->HCregaddr); 1899 readl(phba->HCregaddr); /* flush */ 1900 1901 /* Clear Link Attention in HA REG */ 1902 writel(HA_LATT, phba->HAregaddr); 1903 readl(phba->HAregaddr); /* flush */ 1904 spin_unlock_irq(&phba->hbalock); 1905 lpfc_linkdown(phba); 1906 phba->link_state = LPFC_HBA_ERROR; 1907 1908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1909 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1910 1911 return; 1912 } 1913 1914 /** 1915 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1916 * @phba: pointer to lpfc hba data structure. 1917 * @vpd: pointer to the vital product data. 1918 * @len: length of the vital product data in bytes. 1919 * 1920 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1921 * an array of characters. In this routine, the ModelName, ProgramType, and 1922 * ModelDesc, etc. fields of the phba data structure will be populated. 1923 * 1924 * Return codes 1925 * 0 - pointer to the VPD passed in is NULL 1926 * 1 - success 1927 **/ 1928 int 1929 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1930 { 1931 uint8_t lenlo, lenhi; 1932 int Length; 1933 int i, j; 1934 int finished = 0; 1935 int index = 0; 1936 1937 if (!vpd) 1938 return 0; 1939 1940 /* Vital Product */ 1941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1942 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1943 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1944 (uint32_t) vpd[3]); 1945 while (!finished && (index < (len - 4))) { 1946 switch (vpd[index]) { 1947 case 0x82: 1948 case 0x91: 1949 index += 1; 1950 lenlo = vpd[index]; 1951 index += 1; 1952 lenhi = vpd[index]; 1953 index += 1; 1954 i = ((((unsigned short)lenhi) << 8) + lenlo); 1955 index += i; 1956 break; 1957 case 0x90: 1958 index += 1; 1959 lenlo = vpd[index]; 1960 index += 1; 1961 lenhi = vpd[index]; 1962 index += 1; 1963 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1964 if (Length > len - index) 1965 Length = len - index; 1966 while (Length > 0) { 1967 /* Look for Serial Number */ 1968 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1969 index += 2; 1970 i = vpd[index]; 1971 index += 1; 1972 j = 0; 1973 Length -= (3+i); 1974 while(i--) { 1975 phba->SerialNumber[j++] = vpd[index++]; 1976 if (j == 31) 1977 break; 1978 } 1979 phba->SerialNumber[j] = 0; 1980 continue; 1981 } 1982 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1983 phba->vpd_flag |= VPD_MODEL_DESC; 1984 index += 2; 1985 i = vpd[index]; 1986 index += 1; 1987 j = 0; 1988 Length -= (3+i); 1989 while(i--) { 1990 phba->ModelDesc[j++] = vpd[index++]; 1991 if (j == 255) 1992 break; 1993 } 1994 phba->ModelDesc[j] = 0; 1995 continue; 1996 } 1997 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1998 phba->vpd_flag |= VPD_MODEL_NAME; 1999 index += 2; 2000 i = vpd[index]; 2001 index += 1; 2002 j = 0; 2003 Length -= (3+i); 2004 while(i--) { 2005 phba->ModelName[j++] = vpd[index++]; 2006 if (j == 79) 2007 break; 2008 } 2009 phba->ModelName[j] = 0; 2010 continue; 2011 } 2012 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2013 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2014 index += 2; 2015 i = vpd[index]; 2016 index += 1; 2017 j = 0; 2018 Length -= (3+i); 2019 while(i--) { 2020 phba->ProgramType[j++] = vpd[index++]; 2021 if (j == 255) 2022 break; 2023 } 2024 phba->ProgramType[j] = 0; 2025 continue; 2026 } 2027 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2028 phba->vpd_flag |= VPD_PORT; 2029 index += 2; 2030 i = vpd[index]; 2031 index += 1; 2032 j = 0; 2033 Length -= (3+i); 2034 while(i--) { 2035 if ((phba->sli_rev == LPFC_SLI_REV4) && 2036 (phba->sli4_hba.pport_name_sta == 2037 LPFC_SLI4_PPNAME_GET)) { 2038 j++; 2039 index++; 2040 } else 2041 phba->Port[j++] = vpd[index++]; 2042 if (j == 19) 2043 break; 2044 } 2045 if ((phba->sli_rev != LPFC_SLI_REV4) || 2046 (phba->sli4_hba.pport_name_sta == 2047 LPFC_SLI4_PPNAME_NON)) 2048 phba->Port[j] = 0; 2049 continue; 2050 } 2051 else { 2052 index += 2; 2053 i = vpd[index]; 2054 index += 1; 2055 index += i; 2056 Length -= (3 + i); 2057 } 2058 } 2059 finished = 0; 2060 break; 2061 case 0x78: 2062 finished = 1; 2063 break; 2064 default: 2065 index ++; 2066 break; 2067 } 2068 } 2069 2070 return(1); 2071 } 2072 2073 /** 2074 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2075 * @phba: pointer to lpfc hba data structure. 2076 * @mdp: pointer to the data structure to hold the derived model name. 2077 * @descp: pointer to the data structure to hold the derived description. 2078 * 2079 * This routine retrieves HBA's description based on its registered PCI device 2080 * ID. The @descp passed into this function points to an array of 256 chars. It 2081 * shall be returned with the model name, maximum speed, and the host bus type. 2082 * The @mdp passed into this function points to an array of 80 chars. When the 2083 * function returns, the @mdp will be filled with the model name. 2084 **/ 2085 static void 2086 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2087 { 2088 lpfc_vpd_t *vp; 2089 uint16_t dev_id = phba->pcidev->device; 2090 int max_speed; 2091 int GE = 0; 2092 int oneConnect = 0; /* default is not a oneConnect */ 2093 struct { 2094 char *name; 2095 char *bus; 2096 char *function; 2097 } m = {"<Unknown>", "", ""}; 2098 2099 if (mdp && mdp[0] != '\0' 2100 && descp && descp[0] != '\0') 2101 return; 2102 2103 if (phba->lmt & LMT_32Gb) 2104 max_speed = 32; 2105 else if (phba->lmt & LMT_16Gb) 2106 max_speed = 16; 2107 else if (phba->lmt & LMT_10Gb) 2108 max_speed = 10; 2109 else if (phba->lmt & LMT_8Gb) 2110 max_speed = 8; 2111 else if (phba->lmt & LMT_4Gb) 2112 max_speed = 4; 2113 else if (phba->lmt & LMT_2Gb) 2114 max_speed = 2; 2115 else if (phba->lmt & LMT_1Gb) 2116 max_speed = 1; 2117 else 2118 max_speed = 0; 2119 2120 vp = &phba->vpd; 2121 2122 switch (dev_id) { 2123 case PCI_DEVICE_ID_FIREFLY: 2124 m = (typeof(m)){"LP6000", "PCI", 2125 "Obsolete, Unsupported Fibre Channel Adapter"}; 2126 break; 2127 case PCI_DEVICE_ID_SUPERFLY: 2128 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2129 m = (typeof(m)){"LP7000", "PCI", ""}; 2130 else 2131 m = (typeof(m)){"LP7000E", "PCI", ""}; 2132 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2133 break; 2134 case PCI_DEVICE_ID_DRAGONFLY: 2135 m = (typeof(m)){"LP8000", "PCI", 2136 "Obsolete, Unsupported Fibre Channel Adapter"}; 2137 break; 2138 case PCI_DEVICE_ID_CENTAUR: 2139 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2140 m = (typeof(m)){"LP9002", "PCI", ""}; 2141 else 2142 m = (typeof(m)){"LP9000", "PCI", ""}; 2143 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2144 break; 2145 case PCI_DEVICE_ID_RFLY: 2146 m = (typeof(m)){"LP952", "PCI", 2147 "Obsolete, Unsupported Fibre Channel Adapter"}; 2148 break; 2149 case PCI_DEVICE_ID_PEGASUS: 2150 m = (typeof(m)){"LP9802", "PCI-X", 2151 "Obsolete, Unsupported Fibre Channel Adapter"}; 2152 break; 2153 case PCI_DEVICE_ID_THOR: 2154 m = (typeof(m)){"LP10000", "PCI-X", 2155 "Obsolete, Unsupported Fibre Channel Adapter"}; 2156 break; 2157 case PCI_DEVICE_ID_VIPER: 2158 m = (typeof(m)){"LPX1000", "PCI-X", 2159 "Obsolete, Unsupported Fibre Channel Adapter"}; 2160 break; 2161 case PCI_DEVICE_ID_PFLY: 2162 m = (typeof(m)){"LP982", "PCI-X", 2163 "Obsolete, Unsupported Fibre Channel Adapter"}; 2164 break; 2165 case PCI_DEVICE_ID_TFLY: 2166 m = (typeof(m)){"LP1050", "PCI-X", 2167 "Obsolete, Unsupported Fibre Channel Adapter"}; 2168 break; 2169 case PCI_DEVICE_ID_HELIOS: 2170 m = (typeof(m)){"LP11000", "PCI-X2", 2171 "Obsolete, Unsupported Fibre Channel Adapter"}; 2172 break; 2173 case PCI_DEVICE_ID_HELIOS_SCSP: 2174 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2175 "Obsolete, Unsupported Fibre Channel Adapter"}; 2176 break; 2177 case PCI_DEVICE_ID_HELIOS_DCSP: 2178 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2179 "Obsolete, Unsupported Fibre Channel Adapter"}; 2180 break; 2181 case PCI_DEVICE_ID_NEPTUNE: 2182 m = (typeof(m)){"LPe1000", "PCIe", 2183 "Obsolete, Unsupported Fibre Channel Adapter"}; 2184 break; 2185 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2186 m = (typeof(m)){"LPe1000-SP", "PCIe", 2187 "Obsolete, Unsupported Fibre Channel Adapter"}; 2188 break; 2189 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2190 m = (typeof(m)){"LPe1002-SP", "PCIe", 2191 "Obsolete, Unsupported Fibre Channel Adapter"}; 2192 break; 2193 case PCI_DEVICE_ID_BMID: 2194 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2195 break; 2196 case PCI_DEVICE_ID_BSMB: 2197 m = (typeof(m)){"LP111", "PCI-X2", 2198 "Obsolete, Unsupported Fibre Channel Adapter"}; 2199 break; 2200 case PCI_DEVICE_ID_ZEPHYR: 2201 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2202 break; 2203 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2204 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2205 break; 2206 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2207 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2208 GE = 1; 2209 break; 2210 case PCI_DEVICE_ID_ZMID: 2211 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2212 break; 2213 case PCI_DEVICE_ID_ZSMB: 2214 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2215 break; 2216 case PCI_DEVICE_ID_LP101: 2217 m = (typeof(m)){"LP101", "PCI-X", 2218 "Obsolete, Unsupported Fibre Channel Adapter"}; 2219 break; 2220 case PCI_DEVICE_ID_LP10000S: 2221 m = (typeof(m)){"LP10000-S", "PCI", 2222 "Obsolete, Unsupported Fibre Channel Adapter"}; 2223 break; 2224 case PCI_DEVICE_ID_LP11000S: 2225 m = (typeof(m)){"LP11000-S", "PCI-X2", 2226 "Obsolete, Unsupported Fibre Channel Adapter"}; 2227 break; 2228 case PCI_DEVICE_ID_LPE11000S: 2229 m = (typeof(m)){"LPe11000-S", "PCIe", 2230 "Obsolete, Unsupported Fibre Channel Adapter"}; 2231 break; 2232 case PCI_DEVICE_ID_SAT: 2233 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2234 break; 2235 case PCI_DEVICE_ID_SAT_MID: 2236 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2237 break; 2238 case PCI_DEVICE_ID_SAT_SMB: 2239 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2240 break; 2241 case PCI_DEVICE_ID_SAT_DCSP: 2242 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2243 break; 2244 case PCI_DEVICE_ID_SAT_SCSP: 2245 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2246 break; 2247 case PCI_DEVICE_ID_SAT_S: 2248 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2249 break; 2250 case PCI_DEVICE_ID_HORNET: 2251 m = (typeof(m)){"LP21000", "PCIe", 2252 "Obsolete, Unsupported FCoE Adapter"}; 2253 GE = 1; 2254 break; 2255 case PCI_DEVICE_ID_PROTEUS_VF: 2256 m = (typeof(m)){"LPev12000", "PCIe IOV", 2257 "Obsolete, Unsupported Fibre Channel Adapter"}; 2258 break; 2259 case PCI_DEVICE_ID_PROTEUS_PF: 2260 m = (typeof(m)){"LPev12000", "PCIe IOV", 2261 "Obsolete, Unsupported Fibre Channel Adapter"}; 2262 break; 2263 case PCI_DEVICE_ID_PROTEUS_S: 2264 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2265 "Obsolete, Unsupported Fibre Channel Adapter"}; 2266 break; 2267 case PCI_DEVICE_ID_TIGERSHARK: 2268 oneConnect = 1; 2269 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2270 break; 2271 case PCI_DEVICE_ID_TOMCAT: 2272 oneConnect = 1; 2273 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2274 break; 2275 case PCI_DEVICE_ID_FALCON: 2276 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2277 "EmulexSecure Fibre"}; 2278 break; 2279 case PCI_DEVICE_ID_BALIUS: 2280 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2281 "Obsolete, Unsupported Fibre Channel Adapter"}; 2282 break; 2283 case PCI_DEVICE_ID_LANCER_FC: 2284 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2285 break; 2286 case PCI_DEVICE_ID_LANCER_FC_VF: 2287 m = (typeof(m)){"LPe16000", "PCIe", 2288 "Obsolete, Unsupported Fibre Channel Adapter"}; 2289 break; 2290 case PCI_DEVICE_ID_LANCER_FCOE: 2291 oneConnect = 1; 2292 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2293 break; 2294 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2295 oneConnect = 1; 2296 m = (typeof(m)){"OCe15100", "PCIe", 2297 "Obsolete, Unsupported FCoE"}; 2298 break; 2299 case PCI_DEVICE_ID_LANCER_G6_FC: 2300 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2301 break; 2302 case PCI_DEVICE_ID_SKYHAWK: 2303 case PCI_DEVICE_ID_SKYHAWK_VF: 2304 oneConnect = 1; 2305 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2306 break; 2307 default: 2308 m = (typeof(m)){"Unknown", "", ""}; 2309 break; 2310 } 2311 2312 if (mdp && mdp[0] == '\0') 2313 snprintf(mdp, 79,"%s", m.name); 2314 /* 2315 * oneConnect hba requires special processing, they are all initiators 2316 * and we put the port number on the end 2317 */ 2318 if (descp && descp[0] == '\0') { 2319 if (oneConnect) 2320 snprintf(descp, 255, 2321 "Emulex OneConnect %s, %s Initiator %s", 2322 m.name, m.function, 2323 phba->Port); 2324 else if (max_speed == 0) 2325 snprintf(descp, 255, 2326 "Emulex %s %s %s", 2327 m.name, m.bus, m.function); 2328 else 2329 snprintf(descp, 255, 2330 "Emulex %s %d%s %s %s", 2331 m.name, max_speed, (GE) ? "GE" : "Gb", 2332 m.bus, m.function); 2333 } 2334 } 2335 2336 /** 2337 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2338 * @phba: pointer to lpfc hba data structure. 2339 * @pring: pointer to a IOCB ring. 2340 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2341 * 2342 * This routine posts a given number of IOCBs with the associated DMA buffer 2343 * descriptors specified by the cnt argument to the given IOCB ring. 2344 * 2345 * Return codes 2346 * The number of IOCBs NOT able to be posted to the IOCB ring. 2347 **/ 2348 int 2349 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2350 { 2351 IOCB_t *icmd; 2352 struct lpfc_iocbq *iocb; 2353 struct lpfc_dmabuf *mp1, *mp2; 2354 2355 cnt += pring->missbufcnt; 2356 2357 /* While there are buffers to post */ 2358 while (cnt > 0) { 2359 /* Allocate buffer for command iocb */ 2360 iocb = lpfc_sli_get_iocbq(phba); 2361 if (iocb == NULL) { 2362 pring->missbufcnt = cnt; 2363 return cnt; 2364 } 2365 icmd = &iocb->iocb; 2366 2367 /* 2 buffers can be posted per command */ 2368 /* Allocate buffer to post */ 2369 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2370 if (mp1) 2371 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2372 if (!mp1 || !mp1->virt) { 2373 kfree(mp1); 2374 lpfc_sli_release_iocbq(phba, iocb); 2375 pring->missbufcnt = cnt; 2376 return cnt; 2377 } 2378 2379 INIT_LIST_HEAD(&mp1->list); 2380 /* Allocate buffer to post */ 2381 if (cnt > 1) { 2382 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2383 if (mp2) 2384 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2385 &mp2->phys); 2386 if (!mp2 || !mp2->virt) { 2387 kfree(mp2); 2388 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2389 kfree(mp1); 2390 lpfc_sli_release_iocbq(phba, iocb); 2391 pring->missbufcnt = cnt; 2392 return cnt; 2393 } 2394 2395 INIT_LIST_HEAD(&mp2->list); 2396 } else { 2397 mp2 = NULL; 2398 } 2399 2400 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2401 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2402 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2403 icmd->ulpBdeCount = 1; 2404 cnt--; 2405 if (mp2) { 2406 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2407 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2408 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2409 cnt--; 2410 icmd->ulpBdeCount = 2; 2411 } 2412 2413 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2414 icmd->ulpLe = 1; 2415 2416 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2417 IOCB_ERROR) { 2418 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2419 kfree(mp1); 2420 cnt++; 2421 if (mp2) { 2422 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2423 kfree(mp2); 2424 cnt++; 2425 } 2426 lpfc_sli_release_iocbq(phba, iocb); 2427 pring->missbufcnt = cnt; 2428 return cnt; 2429 } 2430 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2431 if (mp2) 2432 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2433 } 2434 pring->missbufcnt = 0; 2435 return 0; 2436 } 2437 2438 /** 2439 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2440 * @phba: pointer to lpfc hba data structure. 2441 * 2442 * This routine posts initial receive IOCB buffers to the ELS ring. The 2443 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2444 * set to 64 IOCBs. 2445 * 2446 * Return codes 2447 * 0 - success (currently always success) 2448 **/ 2449 static int 2450 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2451 { 2452 struct lpfc_sli *psli = &phba->sli; 2453 2454 /* Ring 0, ELS / CT buffers */ 2455 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2456 /* Ring 2 - FCP no buffers needed */ 2457 2458 return 0; 2459 } 2460 2461 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2462 2463 /** 2464 * lpfc_sha_init - Set up initial array of hash table entries 2465 * @HashResultPointer: pointer to an array as hash table. 2466 * 2467 * This routine sets up the initial values to the array of hash table entries 2468 * for the LC HBAs. 2469 **/ 2470 static void 2471 lpfc_sha_init(uint32_t * HashResultPointer) 2472 { 2473 HashResultPointer[0] = 0x67452301; 2474 HashResultPointer[1] = 0xEFCDAB89; 2475 HashResultPointer[2] = 0x98BADCFE; 2476 HashResultPointer[3] = 0x10325476; 2477 HashResultPointer[4] = 0xC3D2E1F0; 2478 } 2479 2480 /** 2481 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2482 * @HashResultPointer: pointer to an initial/result hash table. 2483 * @HashWorkingPointer: pointer to an working hash table. 2484 * 2485 * This routine iterates an initial hash table pointed by @HashResultPointer 2486 * with the values from the working hash table pointeed by @HashWorkingPointer. 2487 * The results are putting back to the initial hash table, returned through 2488 * the @HashResultPointer as the result hash table. 2489 **/ 2490 static void 2491 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2492 { 2493 int t; 2494 uint32_t TEMP; 2495 uint32_t A, B, C, D, E; 2496 t = 16; 2497 do { 2498 HashWorkingPointer[t] = 2499 S(1, 2500 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2501 8] ^ 2502 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2503 } while (++t <= 79); 2504 t = 0; 2505 A = HashResultPointer[0]; 2506 B = HashResultPointer[1]; 2507 C = HashResultPointer[2]; 2508 D = HashResultPointer[3]; 2509 E = HashResultPointer[4]; 2510 2511 do { 2512 if (t < 20) { 2513 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2514 } else if (t < 40) { 2515 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2516 } else if (t < 60) { 2517 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2518 } else { 2519 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2520 } 2521 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2522 E = D; 2523 D = C; 2524 C = S(30, B); 2525 B = A; 2526 A = TEMP; 2527 } while (++t <= 79); 2528 2529 HashResultPointer[0] += A; 2530 HashResultPointer[1] += B; 2531 HashResultPointer[2] += C; 2532 HashResultPointer[3] += D; 2533 HashResultPointer[4] += E; 2534 2535 } 2536 2537 /** 2538 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2539 * @RandomChallenge: pointer to the entry of host challenge random number array. 2540 * @HashWorking: pointer to the entry of the working hash array. 2541 * 2542 * This routine calculates the working hash array referred by @HashWorking 2543 * from the challenge random numbers associated with the host, referred by 2544 * @RandomChallenge. The result is put into the entry of the working hash 2545 * array and returned by reference through @HashWorking. 2546 **/ 2547 static void 2548 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2549 { 2550 *HashWorking = (*RandomChallenge ^ *HashWorking); 2551 } 2552 2553 /** 2554 * lpfc_hba_init - Perform special handling for LC HBA initialization 2555 * @phba: pointer to lpfc hba data structure. 2556 * @hbainit: pointer to an array of unsigned 32-bit integers. 2557 * 2558 * This routine performs the special handling for LC HBA initialization. 2559 **/ 2560 void 2561 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2562 { 2563 int t; 2564 uint32_t *HashWorking; 2565 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2566 2567 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2568 if (!HashWorking) 2569 return; 2570 2571 HashWorking[0] = HashWorking[78] = *pwwnn++; 2572 HashWorking[1] = HashWorking[79] = *pwwnn; 2573 2574 for (t = 0; t < 7; t++) 2575 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2576 2577 lpfc_sha_init(hbainit); 2578 lpfc_sha_iterate(hbainit, HashWorking); 2579 kfree(HashWorking); 2580 } 2581 2582 /** 2583 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2584 * @vport: pointer to a virtual N_Port data structure. 2585 * 2586 * This routine performs the necessary cleanups before deleting the @vport. 2587 * It invokes the discovery state machine to perform necessary state 2588 * transitions and to release the ndlps associated with the @vport. Note, 2589 * the physical port is treated as @vport 0. 2590 **/ 2591 void 2592 lpfc_cleanup(struct lpfc_vport *vport) 2593 { 2594 struct lpfc_hba *phba = vport->phba; 2595 struct lpfc_nodelist *ndlp, *next_ndlp; 2596 int i = 0; 2597 2598 if (phba->link_state > LPFC_LINK_DOWN) 2599 lpfc_port_link_failure(vport); 2600 2601 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2602 if (!NLP_CHK_NODE_ACT(ndlp)) { 2603 ndlp = lpfc_enable_node(vport, ndlp, 2604 NLP_STE_UNUSED_NODE); 2605 if (!ndlp) 2606 continue; 2607 spin_lock_irq(&phba->ndlp_lock); 2608 NLP_SET_FREE_REQ(ndlp); 2609 spin_unlock_irq(&phba->ndlp_lock); 2610 /* Trigger the release of the ndlp memory */ 2611 lpfc_nlp_put(ndlp); 2612 continue; 2613 } 2614 spin_lock_irq(&phba->ndlp_lock); 2615 if (NLP_CHK_FREE_REQ(ndlp)) { 2616 /* The ndlp should not be in memory free mode already */ 2617 spin_unlock_irq(&phba->ndlp_lock); 2618 continue; 2619 } else 2620 /* Indicate request for freeing ndlp memory */ 2621 NLP_SET_FREE_REQ(ndlp); 2622 spin_unlock_irq(&phba->ndlp_lock); 2623 2624 if (vport->port_type != LPFC_PHYSICAL_PORT && 2625 ndlp->nlp_DID == Fabric_DID) { 2626 /* Just free up ndlp with Fabric_DID for vports */ 2627 lpfc_nlp_put(ndlp); 2628 continue; 2629 } 2630 2631 /* take care of nodes in unused state before the state 2632 * machine taking action. 2633 */ 2634 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2635 lpfc_nlp_put(ndlp); 2636 continue; 2637 } 2638 2639 if (ndlp->nlp_type & NLP_FABRIC) 2640 lpfc_disc_state_machine(vport, ndlp, NULL, 2641 NLP_EVT_DEVICE_RECOVERY); 2642 2643 lpfc_disc_state_machine(vport, ndlp, NULL, 2644 NLP_EVT_DEVICE_RM); 2645 } 2646 2647 /* At this point, ALL ndlp's should be gone 2648 * because of the previous NLP_EVT_DEVICE_RM. 2649 * Lets wait for this to happen, if needed. 2650 */ 2651 while (!list_empty(&vport->fc_nodes)) { 2652 if (i++ > 3000) { 2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2654 "0233 Nodelist not empty\n"); 2655 list_for_each_entry_safe(ndlp, next_ndlp, 2656 &vport->fc_nodes, nlp_listp) { 2657 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2658 LOG_NODE, 2659 "0282 did:x%x ndlp:x%p " 2660 "usgmap:x%x refcnt:%d\n", 2661 ndlp->nlp_DID, (void *)ndlp, 2662 ndlp->nlp_usg_map, 2663 kref_read(&ndlp->kref)); 2664 } 2665 break; 2666 } 2667 2668 /* Wait for any activity on ndlps to settle */ 2669 msleep(10); 2670 } 2671 lpfc_cleanup_vports_rrqs(vport, NULL); 2672 } 2673 2674 /** 2675 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2676 * @vport: pointer to a virtual N_Port data structure. 2677 * 2678 * This routine stops all the timers associated with a @vport. This function 2679 * is invoked before disabling or deleting a @vport. Note that the physical 2680 * port is treated as @vport 0. 2681 **/ 2682 void 2683 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2684 { 2685 del_timer_sync(&vport->els_tmofunc); 2686 del_timer_sync(&vport->delayed_disc_tmo); 2687 lpfc_can_disctmo(vport); 2688 return; 2689 } 2690 2691 /** 2692 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2693 * @phba: pointer to lpfc hba data structure. 2694 * 2695 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2696 * caller of this routine should already hold the host lock. 2697 **/ 2698 void 2699 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2700 { 2701 /* Clear pending FCF rediscovery wait flag */ 2702 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2703 2704 /* Now, try to stop the timer */ 2705 del_timer(&phba->fcf.redisc_wait); 2706 } 2707 2708 /** 2709 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2710 * @phba: pointer to lpfc hba data structure. 2711 * 2712 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2713 * checks whether the FCF rediscovery wait timer is pending with the host 2714 * lock held before proceeding with disabling the timer and clearing the 2715 * wait timer pendig flag. 2716 **/ 2717 void 2718 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2719 { 2720 spin_lock_irq(&phba->hbalock); 2721 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2722 /* FCF rediscovery timer already fired or stopped */ 2723 spin_unlock_irq(&phba->hbalock); 2724 return; 2725 } 2726 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2727 /* Clear failover in progress flags */ 2728 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2729 spin_unlock_irq(&phba->hbalock); 2730 } 2731 2732 /** 2733 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2734 * @phba: pointer to lpfc hba data structure. 2735 * 2736 * This routine stops all the timers associated with a HBA. This function is 2737 * invoked before either putting a HBA offline or unloading the driver. 2738 **/ 2739 void 2740 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2741 { 2742 lpfc_stop_vport_timers(phba->pport); 2743 del_timer_sync(&phba->sli.mbox_tmo); 2744 del_timer_sync(&phba->fabric_block_timer); 2745 del_timer_sync(&phba->eratt_poll); 2746 del_timer_sync(&phba->hb_tmofunc); 2747 if (phba->sli_rev == LPFC_SLI_REV4) { 2748 del_timer_sync(&phba->rrq_tmr); 2749 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2750 } 2751 phba->hb_outstanding = 0; 2752 2753 switch (phba->pci_dev_grp) { 2754 case LPFC_PCI_DEV_LP: 2755 /* Stop any LightPulse device specific driver timers */ 2756 del_timer_sync(&phba->fcp_poll_timer); 2757 break; 2758 case LPFC_PCI_DEV_OC: 2759 /* Stop any OneConnect device sepcific driver timers */ 2760 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2761 break; 2762 default: 2763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2764 "0297 Invalid device group (x%x)\n", 2765 phba->pci_dev_grp); 2766 break; 2767 } 2768 return; 2769 } 2770 2771 /** 2772 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2773 * @phba: pointer to lpfc hba data structure. 2774 * 2775 * This routine marks a HBA's management interface as blocked. Once the HBA's 2776 * management interface is marked as blocked, all the user space access to 2777 * the HBA, whether they are from sysfs interface or libdfc interface will 2778 * all be blocked. The HBA is set to block the management interface when the 2779 * driver prepares the HBA interface for online or offline. 2780 **/ 2781 static void 2782 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2783 { 2784 unsigned long iflag; 2785 uint8_t actcmd = MBX_HEARTBEAT; 2786 unsigned long timeout; 2787 2788 spin_lock_irqsave(&phba->hbalock, iflag); 2789 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2790 spin_unlock_irqrestore(&phba->hbalock, iflag); 2791 if (mbx_action == LPFC_MBX_NO_WAIT) 2792 return; 2793 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2794 spin_lock_irqsave(&phba->hbalock, iflag); 2795 if (phba->sli.mbox_active) { 2796 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2797 /* Determine how long we might wait for the active mailbox 2798 * command to be gracefully completed by firmware. 2799 */ 2800 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2801 phba->sli.mbox_active) * 1000) + jiffies; 2802 } 2803 spin_unlock_irqrestore(&phba->hbalock, iflag); 2804 2805 /* Wait for the outstnading mailbox command to complete */ 2806 while (phba->sli.mbox_active) { 2807 /* Check active mailbox complete status every 2ms */ 2808 msleep(2); 2809 if (time_after(jiffies, timeout)) { 2810 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2811 "2813 Mgmt IO is Blocked %x " 2812 "- mbox cmd %x still active\n", 2813 phba->sli.sli_flag, actcmd); 2814 break; 2815 } 2816 } 2817 } 2818 2819 /** 2820 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2821 * @phba: pointer to lpfc hba data structure. 2822 * 2823 * Allocate RPIs for all active remote nodes. This is needed whenever 2824 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2825 * is to fixup the temporary rpi assignments. 2826 **/ 2827 void 2828 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2829 { 2830 struct lpfc_nodelist *ndlp, *next_ndlp; 2831 struct lpfc_vport **vports; 2832 int i; 2833 2834 if (phba->sli_rev != LPFC_SLI_REV4) 2835 return; 2836 2837 vports = lpfc_create_vport_work_array(phba); 2838 if (vports != NULL) { 2839 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2840 if (vports[i]->load_flag & FC_UNLOADING) 2841 continue; 2842 2843 list_for_each_entry_safe(ndlp, next_ndlp, 2844 &vports[i]->fc_nodes, 2845 nlp_listp) { 2846 if (NLP_CHK_NODE_ACT(ndlp)) { 2847 ndlp->nlp_rpi = 2848 lpfc_sli4_alloc_rpi(phba); 2849 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2850 LOG_NODE, 2851 "0009 rpi:%x DID:%x " 2852 "flg:%x map:%x %p\n", 2853 ndlp->nlp_rpi, 2854 ndlp->nlp_DID, 2855 ndlp->nlp_flag, 2856 ndlp->nlp_usg_map, 2857 ndlp); 2858 } 2859 } 2860 } 2861 } 2862 lpfc_destroy_vport_work_array(phba, vports); 2863 } 2864 2865 /** 2866 * lpfc_online - Initialize and bring a HBA online 2867 * @phba: pointer to lpfc hba data structure. 2868 * 2869 * This routine initializes the HBA and brings a HBA online. During this 2870 * process, the management interface is blocked to prevent user space access 2871 * to the HBA interfering with the driver initialization. 2872 * 2873 * Return codes 2874 * 0 - successful 2875 * 1 - failed 2876 **/ 2877 int 2878 lpfc_online(struct lpfc_hba *phba) 2879 { 2880 struct lpfc_vport *vport; 2881 struct lpfc_vport **vports; 2882 int i; 2883 bool vpis_cleared = false; 2884 2885 if (!phba) 2886 return 0; 2887 vport = phba->pport; 2888 2889 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2890 return 0; 2891 2892 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2893 "0458 Bring Adapter online\n"); 2894 2895 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2896 2897 if (!lpfc_sli_queue_setup(phba)) { 2898 lpfc_unblock_mgmt_io(phba); 2899 return 1; 2900 } 2901 2902 if (phba->sli_rev == LPFC_SLI_REV4) { 2903 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2904 lpfc_unblock_mgmt_io(phba); 2905 return 1; 2906 } 2907 spin_lock_irq(&phba->hbalock); 2908 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2909 vpis_cleared = true; 2910 spin_unlock_irq(&phba->hbalock); 2911 } else { 2912 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2913 lpfc_unblock_mgmt_io(phba); 2914 return 1; 2915 } 2916 } 2917 2918 vports = lpfc_create_vport_work_array(phba); 2919 if (vports != NULL) { 2920 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2921 struct Scsi_Host *shost; 2922 shost = lpfc_shost_from_vport(vports[i]); 2923 spin_lock_irq(shost->host_lock); 2924 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2925 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2926 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2927 if (phba->sli_rev == LPFC_SLI_REV4) { 2928 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2929 if ((vpis_cleared) && 2930 (vports[i]->port_type != 2931 LPFC_PHYSICAL_PORT)) 2932 vports[i]->vpi = 0; 2933 } 2934 spin_unlock_irq(shost->host_lock); 2935 } 2936 } 2937 lpfc_destroy_vport_work_array(phba, vports); 2938 2939 lpfc_unblock_mgmt_io(phba); 2940 return 0; 2941 } 2942 2943 /** 2944 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2945 * @phba: pointer to lpfc hba data structure. 2946 * 2947 * This routine marks a HBA's management interface as not blocked. Once the 2948 * HBA's management interface is marked as not blocked, all the user space 2949 * access to the HBA, whether they are from sysfs interface or libdfc 2950 * interface will be allowed. The HBA is set to block the management interface 2951 * when the driver prepares the HBA interface for online or offline and then 2952 * set to unblock the management interface afterwards. 2953 **/ 2954 void 2955 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2956 { 2957 unsigned long iflag; 2958 2959 spin_lock_irqsave(&phba->hbalock, iflag); 2960 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2961 spin_unlock_irqrestore(&phba->hbalock, iflag); 2962 } 2963 2964 /** 2965 * lpfc_offline_prep - Prepare a HBA to be brought offline 2966 * @phba: pointer to lpfc hba data structure. 2967 * 2968 * This routine is invoked to prepare a HBA to be brought offline. It performs 2969 * unregistration login to all the nodes on all vports and flushes the mailbox 2970 * queue to make it ready to be brought offline. 2971 **/ 2972 void 2973 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2974 { 2975 struct lpfc_vport *vport = phba->pport; 2976 struct lpfc_nodelist *ndlp, *next_ndlp; 2977 struct lpfc_vport **vports; 2978 struct Scsi_Host *shost; 2979 int i; 2980 2981 if (vport->fc_flag & FC_OFFLINE_MODE) 2982 return; 2983 2984 lpfc_block_mgmt_io(phba, mbx_action); 2985 2986 lpfc_linkdown(phba); 2987 2988 /* Issue an unreg_login to all nodes on all vports */ 2989 vports = lpfc_create_vport_work_array(phba); 2990 if (vports != NULL) { 2991 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2992 if (vports[i]->load_flag & FC_UNLOADING) 2993 continue; 2994 shost = lpfc_shost_from_vport(vports[i]); 2995 spin_lock_irq(shost->host_lock); 2996 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2997 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2998 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 2999 spin_unlock_irq(shost->host_lock); 3000 3001 shost = lpfc_shost_from_vport(vports[i]); 3002 list_for_each_entry_safe(ndlp, next_ndlp, 3003 &vports[i]->fc_nodes, 3004 nlp_listp) { 3005 if (!NLP_CHK_NODE_ACT(ndlp)) 3006 continue; 3007 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3008 continue; 3009 if (ndlp->nlp_type & NLP_FABRIC) { 3010 lpfc_disc_state_machine(vports[i], ndlp, 3011 NULL, NLP_EVT_DEVICE_RECOVERY); 3012 lpfc_disc_state_machine(vports[i], ndlp, 3013 NULL, NLP_EVT_DEVICE_RM); 3014 } 3015 spin_lock_irq(shost->host_lock); 3016 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3017 spin_unlock_irq(shost->host_lock); 3018 /* 3019 * Whenever an SLI4 port goes offline, free the 3020 * RPI. Get a new RPI when the adapter port 3021 * comes back online. 3022 */ 3023 if (phba->sli_rev == LPFC_SLI_REV4) { 3024 lpfc_printf_vlog(ndlp->vport, 3025 KERN_INFO, LOG_NODE, 3026 "0011 lpfc_offline: " 3027 "ndlp:x%p did %x " 3028 "usgmap:x%x rpi:%x\n", 3029 ndlp, ndlp->nlp_DID, 3030 ndlp->nlp_usg_map, 3031 ndlp->nlp_rpi); 3032 3033 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3034 } 3035 lpfc_unreg_rpi(vports[i], ndlp); 3036 } 3037 } 3038 } 3039 lpfc_destroy_vport_work_array(phba, vports); 3040 3041 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3042 } 3043 3044 /** 3045 * lpfc_offline - Bring a HBA offline 3046 * @phba: pointer to lpfc hba data structure. 3047 * 3048 * This routine actually brings a HBA offline. It stops all the timers 3049 * associated with the HBA, brings down the SLI layer, and eventually 3050 * marks the HBA as in offline state for the upper layer protocol. 3051 **/ 3052 void 3053 lpfc_offline(struct lpfc_hba *phba) 3054 { 3055 struct Scsi_Host *shost; 3056 struct lpfc_vport **vports; 3057 int i; 3058 3059 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3060 return; 3061 3062 /* stop port and all timers associated with this hba */ 3063 lpfc_stop_port(phba); 3064 vports = lpfc_create_vport_work_array(phba); 3065 if (vports != NULL) 3066 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3067 lpfc_stop_vport_timers(vports[i]); 3068 lpfc_destroy_vport_work_array(phba, vports); 3069 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3070 "0460 Bring Adapter offline\n"); 3071 /* Bring down the SLI Layer and cleanup. The HBA is offline 3072 now. */ 3073 lpfc_sli_hba_down(phba); 3074 spin_lock_irq(&phba->hbalock); 3075 phba->work_ha = 0; 3076 spin_unlock_irq(&phba->hbalock); 3077 vports = lpfc_create_vport_work_array(phba); 3078 if (vports != NULL) 3079 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3080 shost = lpfc_shost_from_vport(vports[i]); 3081 spin_lock_irq(shost->host_lock); 3082 vports[i]->work_port_events = 0; 3083 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3084 spin_unlock_irq(shost->host_lock); 3085 } 3086 lpfc_destroy_vport_work_array(phba, vports); 3087 } 3088 3089 /** 3090 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3091 * @phba: pointer to lpfc hba data structure. 3092 * 3093 * This routine is to free all the SCSI buffers and IOCBs from the driver 3094 * list back to kernel. It is called from lpfc_pci_remove_one to free 3095 * the internal resources before the device is removed from the system. 3096 **/ 3097 static void 3098 lpfc_scsi_free(struct lpfc_hba *phba) 3099 { 3100 struct lpfc_scsi_buf *sb, *sb_next; 3101 struct lpfc_iocbq *io, *io_next; 3102 3103 spin_lock_irq(&phba->hbalock); 3104 3105 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3106 3107 spin_lock(&phba->scsi_buf_list_put_lock); 3108 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3109 list) { 3110 list_del(&sb->list); 3111 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3112 sb->dma_handle); 3113 kfree(sb); 3114 phba->total_scsi_bufs--; 3115 } 3116 spin_unlock(&phba->scsi_buf_list_put_lock); 3117 3118 spin_lock(&phba->scsi_buf_list_get_lock); 3119 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3120 list) { 3121 list_del(&sb->list); 3122 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3123 sb->dma_handle); 3124 kfree(sb); 3125 phba->total_scsi_bufs--; 3126 } 3127 spin_unlock(&phba->scsi_buf_list_get_lock); 3128 3129 /* Release all the lpfc_iocbq entries maintained by this host. */ 3130 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 3131 list_del(&io->list); 3132 kfree(io); 3133 phba->total_iocbq_bufs--; 3134 } 3135 3136 spin_unlock_irq(&phba->hbalock); 3137 } 3138 3139 /** 3140 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 3141 * @phba: pointer to lpfc hba data structure. 3142 * 3143 * This routine first calculates the sizes of the current els and allocated 3144 * scsi sgl lists, and then goes through all sgls to updates the physical 3145 * XRIs assigned due to port function reset. During port initialization, the 3146 * current els and allocated scsi sgl lists are 0s. 3147 * 3148 * Return codes 3149 * 0 - successful (for now, it always returns 0) 3150 **/ 3151 int 3152 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 3153 { 3154 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3155 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 3156 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 3157 LIST_HEAD(els_sgl_list); 3158 LIST_HEAD(scsi_sgl_list); 3159 int rc; 3160 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3161 3162 /* 3163 * update on pci function's els xri-sgl list 3164 */ 3165 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3166 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3167 /* els xri-sgl expanded */ 3168 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3169 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3170 "3157 ELS xri-sgl count increased from " 3171 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3172 els_xri_cnt); 3173 /* allocate the additional els sgls */ 3174 for (i = 0; i < xri_cnt; i++) { 3175 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3176 GFP_KERNEL); 3177 if (sglq_entry == NULL) { 3178 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3179 "2562 Failure to allocate an " 3180 "ELS sgl entry:%d\n", i); 3181 rc = -ENOMEM; 3182 goto out_free_mem; 3183 } 3184 sglq_entry->buff_type = GEN_BUFF_TYPE; 3185 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3186 &sglq_entry->phys); 3187 if (sglq_entry->virt == NULL) { 3188 kfree(sglq_entry); 3189 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3190 "2563 Failure to allocate an " 3191 "ELS mbuf:%d\n", i); 3192 rc = -ENOMEM; 3193 goto out_free_mem; 3194 } 3195 sglq_entry->sgl = sglq_entry->virt; 3196 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3197 sglq_entry->state = SGL_FREED; 3198 list_add_tail(&sglq_entry->list, &els_sgl_list); 3199 } 3200 spin_lock_irq(&phba->hbalock); 3201 spin_lock(&pring->ring_lock); 3202 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3203 spin_unlock(&pring->ring_lock); 3204 spin_unlock_irq(&phba->hbalock); 3205 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3206 /* els xri-sgl shrinked */ 3207 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3209 "3158 ELS xri-sgl count decreased from " 3210 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3211 els_xri_cnt); 3212 spin_lock_irq(&phba->hbalock); 3213 spin_lock(&pring->ring_lock); 3214 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3215 spin_unlock(&pring->ring_lock); 3216 spin_unlock_irq(&phba->hbalock); 3217 /* release extra els sgls from list */ 3218 for (i = 0; i < xri_cnt; i++) { 3219 list_remove_head(&els_sgl_list, 3220 sglq_entry, struct lpfc_sglq, list); 3221 if (sglq_entry) { 3222 lpfc_mbuf_free(phba, sglq_entry->virt, 3223 sglq_entry->phys); 3224 kfree(sglq_entry); 3225 } 3226 } 3227 spin_lock_irq(&phba->hbalock); 3228 spin_lock(&pring->ring_lock); 3229 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3230 spin_unlock(&pring->ring_lock); 3231 spin_unlock_irq(&phba->hbalock); 3232 } else 3233 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3234 "3163 ELS xri-sgl count unchanged: %d\n", 3235 els_xri_cnt); 3236 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3237 3238 /* update xris to els sgls on the list */ 3239 sglq_entry = NULL; 3240 sglq_entry_next = NULL; 3241 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3242 &phba->sli4_hba.lpfc_sgl_list, list) { 3243 lxri = lpfc_sli4_next_xritag(phba); 3244 if (lxri == NO_XRI) { 3245 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3246 "2400 Failed to allocate xri for " 3247 "ELS sgl\n"); 3248 rc = -ENOMEM; 3249 goto out_free_mem; 3250 } 3251 sglq_entry->sli4_lxritag = lxri; 3252 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3253 } 3254 3255 /* 3256 * update on pci function's allocated scsi xri-sgl list 3257 */ 3258 phba->total_scsi_bufs = 0; 3259 3260 /* maximum number of xris available for scsi buffers */ 3261 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3262 els_xri_cnt; 3263 3264 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3265 "2401 Current allocated SCSI xri-sgl count:%d, " 3266 "maximum SCSI xri count:%d\n", 3267 phba->sli4_hba.scsi_xri_cnt, 3268 phba->sli4_hba.scsi_xri_max); 3269 3270 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3271 spin_lock(&phba->scsi_buf_list_put_lock); 3272 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3273 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3274 spin_unlock(&phba->scsi_buf_list_put_lock); 3275 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3276 3277 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3278 /* max scsi xri shrinked below the allocated scsi buffers */ 3279 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3280 phba->sli4_hba.scsi_xri_max; 3281 /* release the extra allocated scsi buffers */ 3282 for (i = 0; i < scsi_xri_cnt; i++) { 3283 list_remove_head(&scsi_sgl_list, psb, 3284 struct lpfc_scsi_buf, list); 3285 if (psb) { 3286 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 3287 psb->data, psb->dma_handle); 3288 kfree(psb); 3289 } 3290 } 3291 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3292 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3293 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3294 } 3295 3296 /* update xris associated to remaining allocated scsi buffers */ 3297 psb = NULL; 3298 psb_next = NULL; 3299 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3300 lxri = lpfc_sli4_next_xritag(phba); 3301 if (lxri == NO_XRI) { 3302 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3303 "2560 Failed to allocate xri for " 3304 "scsi buffer\n"); 3305 rc = -ENOMEM; 3306 goto out_free_mem; 3307 } 3308 psb->cur_iocbq.sli4_lxritag = lxri; 3309 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3310 } 3311 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3312 spin_lock(&phba->scsi_buf_list_put_lock); 3313 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3314 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3315 spin_unlock(&phba->scsi_buf_list_put_lock); 3316 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3317 3318 return 0; 3319 3320 out_free_mem: 3321 lpfc_free_els_sgl_list(phba); 3322 lpfc_scsi_free(phba); 3323 return rc; 3324 } 3325 3326 /** 3327 * lpfc_create_port - Create an FC port 3328 * @phba: pointer to lpfc hba data structure. 3329 * @instance: a unique integer ID to this FC port. 3330 * @dev: pointer to the device data structure. 3331 * 3332 * This routine creates a FC port for the upper layer protocol. The FC port 3333 * can be created on top of either a physical port or a virtual port provided 3334 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3335 * and associates the FC port created before adding the shost into the SCSI 3336 * layer. 3337 * 3338 * Return codes 3339 * @vport - pointer to the virtual N_Port data structure. 3340 * NULL - port create failed. 3341 **/ 3342 struct lpfc_vport * 3343 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3344 { 3345 struct lpfc_vport *vport; 3346 struct Scsi_Host *shost; 3347 int error = 0; 3348 3349 if (dev != &phba->pcidev->dev) { 3350 shost = scsi_host_alloc(&lpfc_vport_template, 3351 sizeof(struct lpfc_vport)); 3352 } else { 3353 if (phba->sli_rev == LPFC_SLI_REV4) 3354 shost = scsi_host_alloc(&lpfc_template, 3355 sizeof(struct lpfc_vport)); 3356 else 3357 shost = scsi_host_alloc(&lpfc_template_s3, 3358 sizeof(struct lpfc_vport)); 3359 } 3360 if (!shost) 3361 goto out; 3362 3363 vport = (struct lpfc_vport *) shost->hostdata; 3364 vport->phba = phba; 3365 vport->load_flag |= FC_LOADING; 3366 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3367 vport->fc_rscn_flush = 0; 3368 3369 lpfc_get_vport_cfgparam(vport); 3370 shost->unique_id = instance; 3371 shost->max_id = LPFC_MAX_TARGET; 3372 shost->max_lun = vport->cfg_max_luns; 3373 shost->this_id = -1; 3374 shost->max_cmd_len = 16; 3375 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3376 if (phba->sli_rev == LPFC_SLI_REV4) { 3377 shost->dma_boundary = 3378 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3379 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3380 } 3381 3382 /* 3383 * Set initial can_queue value since 0 is no longer supported and 3384 * scsi_add_host will fail. This will be adjusted later based on the 3385 * max xri value determined in hba setup. 3386 */ 3387 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3388 if (dev != &phba->pcidev->dev) { 3389 shost->transportt = lpfc_vport_transport_template; 3390 vport->port_type = LPFC_NPIV_PORT; 3391 } else { 3392 shost->transportt = lpfc_transport_template; 3393 vport->port_type = LPFC_PHYSICAL_PORT; 3394 } 3395 3396 /* Initialize all internally managed lists. */ 3397 INIT_LIST_HEAD(&vport->fc_nodes); 3398 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3399 spin_lock_init(&vport->work_port_lock); 3400 3401 init_timer(&vport->fc_disctmo); 3402 vport->fc_disctmo.function = lpfc_disc_timeout; 3403 vport->fc_disctmo.data = (unsigned long)vport; 3404 3405 init_timer(&vport->els_tmofunc); 3406 vport->els_tmofunc.function = lpfc_els_timeout; 3407 vport->els_tmofunc.data = (unsigned long)vport; 3408 3409 init_timer(&vport->delayed_disc_tmo); 3410 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3411 vport->delayed_disc_tmo.data = (unsigned long)vport; 3412 3413 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3414 if (error) 3415 goto out_put_shost; 3416 3417 spin_lock_irq(&phba->hbalock); 3418 list_add_tail(&vport->listentry, &phba->port_list); 3419 spin_unlock_irq(&phba->hbalock); 3420 return vport; 3421 3422 out_put_shost: 3423 scsi_host_put(shost); 3424 out: 3425 return NULL; 3426 } 3427 3428 /** 3429 * destroy_port - destroy an FC port 3430 * @vport: pointer to an lpfc virtual N_Port data structure. 3431 * 3432 * This routine destroys a FC port from the upper layer protocol. All the 3433 * resources associated with the port are released. 3434 **/ 3435 void 3436 destroy_port(struct lpfc_vport *vport) 3437 { 3438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3439 struct lpfc_hba *phba = vport->phba; 3440 3441 lpfc_debugfs_terminate(vport); 3442 fc_remove_host(shost); 3443 scsi_remove_host(shost); 3444 3445 spin_lock_irq(&phba->hbalock); 3446 list_del_init(&vport->listentry); 3447 spin_unlock_irq(&phba->hbalock); 3448 3449 lpfc_cleanup(vport); 3450 return; 3451 } 3452 3453 /** 3454 * lpfc_get_instance - Get a unique integer ID 3455 * 3456 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3457 * uses the kernel idr facility to perform the task. 3458 * 3459 * Return codes: 3460 * instance - a unique integer ID allocated as the new instance. 3461 * -1 - lpfc get instance failed. 3462 **/ 3463 int 3464 lpfc_get_instance(void) 3465 { 3466 int ret; 3467 3468 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3469 return ret < 0 ? -1 : ret; 3470 } 3471 3472 /** 3473 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3474 * @shost: pointer to SCSI host data structure. 3475 * @time: elapsed time of the scan in jiffies. 3476 * 3477 * This routine is called by the SCSI layer with a SCSI host to determine 3478 * whether the scan host is finished. 3479 * 3480 * Note: there is no scan_start function as adapter initialization will have 3481 * asynchronously kicked off the link initialization. 3482 * 3483 * Return codes 3484 * 0 - SCSI host scan is not over yet. 3485 * 1 - SCSI host scan is over. 3486 **/ 3487 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3488 { 3489 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3490 struct lpfc_hba *phba = vport->phba; 3491 int stat = 0; 3492 3493 spin_lock_irq(shost->host_lock); 3494 3495 if (vport->load_flag & FC_UNLOADING) { 3496 stat = 1; 3497 goto finished; 3498 } 3499 if (time >= msecs_to_jiffies(30 * 1000)) { 3500 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3501 "0461 Scanning longer than 30 " 3502 "seconds. Continuing initialization\n"); 3503 stat = 1; 3504 goto finished; 3505 } 3506 if (time >= msecs_to_jiffies(15 * 1000) && 3507 phba->link_state <= LPFC_LINK_DOWN) { 3508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3509 "0465 Link down longer than 15 " 3510 "seconds. Continuing initialization\n"); 3511 stat = 1; 3512 goto finished; 3513 } 3514 3515 if (vport->port_state != LPFC_VPORT_READY) 3516 goto finished; 3517 if (vport->num_disc_nodes || vport->fc_prli_sent) 3518 goto finished; 3519 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3520 goto finished; 3521 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3522 goto finished; 3523 3524 stat = 1; 3525 3526 finished: 3527 spin_unlock_irq(shost->host_lock); 3528 return stat; 3529 } 3530 3531 /** 3532 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3533 * @shost: pointer to SCSI host data structure. 3534 * 3535 * This routine initializes a given SCSI host attributes on a FC port. The 3536 * SCSI host can be either on top of a physical port or a virtual port. 3537 **/ 3538 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3539 { 3540 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3541 struct lpfc_hba *phba = vport->phba; 3542 /* 3543 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3544 */ 3545 3546 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3547 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3548 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3549 3550 memset(fc_host_supported_fc4s(shost), 0, 3551 sizeof(fc_host_supported_fc4s(shost))); 3552 fc_host_supported_fc4s(shost)[2] = 1; 3553 fc_host_supported_fc4s(shost)[7] = 1; 3554 3555 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3556 sizeof fc_host_symbolic_name(shost)); 3557 3558 fc_host_supported_speeds(shost) = 0; 3559 if (phba->lmt & LMT_32Gb) 3560 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 3561 if (phba->lmt & LMT_16Gb) 3562 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3563 if (phba->lmt & LMT_10Gb) 3564 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3565 if (phba->lmt & LMT_8Gb) 3566 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3567 if (phba->lmt & LMT_4Gb) 3568 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3569 if (phba->lmt & LMT_2Gb) 3570 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3571 if (phba->lmt & LMT_1Gb) 3572 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3573 3574 fc_host_maxframe_size(shost) = 3575 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3576 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3577 3578 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3579 3580 /* This value is also unchanging */ 3581 memset(fc_host_active_fc4s(shost), 0, 3582 sizeof(fc_host_active_fc4s(shost))); 3583 fc_host_active_fc4s(shost)[2] = 1; 3584 fc_host_active_fc4s(shost)[7] = 1; 3585 3586 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3587 spin_lock_irq(shost->host_lock); 3588 vport->load_flag &= ~FC_LOADING; 3589 spin_unlock_irq(shost->host_lock); 3590 } 3591 3592 /** 3593 * lpfc_stop_port_s3 - Stop SLI3 device port 3594 * @phba: pointer to lpfc hba data structure. 3595 * 3596 * This routine is invoked to stop an SLI3 device port, it stops the device 3597 * from generating interrupts and stops the device driver's timers for the 3598 * device. 3599 **/ 3600 static void 3601 lpfc_stop_port_s3(struct lpfc_hba *phba) 3602 { 3603 /* Clear all interrupt enable conditions */ 3604 writel(0, phba->HCregaddr); 3605 readl(phba->HCregaddr); /* flush */ 3606 /* Clear all pending interrupts */ 3607 writel(0xffffffff, phba->HAregaddr); 3608 readl(phba->HAregaddr); /* flush */ 3609 3610 /* Reset some HBA SLI setup states */ 3611 lpfc_stop_hba_timers(phba); 3612 phba->pport->work_port_events = 0; 3613 } 3614 3615 /** 3616 * lpfc_stop_port_s4 - Stop SLI4 device port 3617 * @phba: pointer to lpfc hba data structure. 3618 * 3619 * This routine is invoked to stop an SLI4 device port, it stops the device 3620 * from generating interrupts and stops the device driver's timers for the 3621 * device. 3622 **/ 3623 static void 3624 lpfc_stop_port_s4(struct lpfc_hba *phba) 3625 { 3626 /* Reset some HBA SLI4 setup states */ 3627 lpfc_stop_hba_timers(phba); 3628 phba->pport->work_port_events = 0; 3629 phba->sli4_hba.intr_enable = 0; 3630 } 3631 3632 /** 3633 * lpfc_stop_port - Wrapper function for stopping hba port 3634 * @phba: Pointer to HBA context object. 3635 * 3636 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3637 * the API jump table function pointer from the lpfc_hba struct. 3638 **/ 3639 void 3640 lpfc_stop_port(struct lpfc_hba *phba) 3641 { 3642 phba->lpfc_stop_port(phba); 3643 } 3644 3645 /** 3646 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3647 * @phba: Pointer to hba for which this call is being executed. 3648 * 3649 * This routine starts the timer waiting for the FCF rediscovery to complete. 3650 **/ 3651 void 3652 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3653 { 3654 unsigned long fcf_redisc_wait_tmo = 3655 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3656 /* Start fcf rediscovery wait period timer */ 3657 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3658 spin_lock_irq(&phba->hbalock); 3659 /* Allow action to new fcf asynchronous event */ 3660 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3661 /* Mark the FCF rediscovery pending state */ 3662 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3663 spin_unlock_irq(&phba->hbalock); 3664 } 3665 3666 /** 3667 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3668 * @ptr: Map to lpfc_hba data structure pointer. 3669 * 3670 * This routine is invoked when waiting for FCF table rediscover has been 3671 * timed out. If new FCF record(s) has (have) been discovered during the 3672 * wait period, a new FCF event shall be added to the FCOE async event 3673 * list, and then worker thread shall be waked up for processing from the 3674 * worker thread context. 3675 **/ 3676 static void 3677 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3678 { 3679 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3680 3681 /* Don't send FCF rediscovery event if timer cancelled */ 3682 spin_lock_irq(&phba->hbalock); 3683 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3684 spin_unlock_irq(&phba->hbalock); 3685 return; 3686 } 3687 /* Clear FCF rediscovery timer pending flag */ 3688 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3689 /* FCF rediscovery event to worker thread */ 3690 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3691 spin_unlock_irq(&phba->hbalock); 3692 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3693 "2776 FCF rediscover quiescent timer expired\n"); 3694 /* wake up worker thread */ 3695 lpfc_worker_wake_up(phba); 3696 } 3697 3698 /** 3699 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3700 * @phba: pointer to lpfc hba data structure. 3701 * @acqe_link: pointer to the async link completion queue entry. 3702 * 3703 * This routine is to parse the SLI4 link-attention link fault code and 3704 * translate it into the base driver's read link attention mailbox command 3705 * status. 3706 * 3707 * Return: Link-attention status in terms of base driver's coding. 3708 **/ 3709 static uint16_t 3710 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3711 struct lpfc_acqe_link *acqe_link) 3712 { 3713 uint16_t latt_fault; 3714 3715 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3716 case LPFC_ASYNC_LINK_FAULT_NONE: 3717 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3718 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3719 latt_fault = 0; 3720 break; 3721 default: 3722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3723 "0398 Invalid link fault code: x%x\n", 3724 bf_get(lpfc_acqe_link_fault, acqe_link)); 3725 latt_fault = MBXERR_ERROR; 3726 break; 3727 } 3728 return latt_fault; 3729 } 3730 3731 /** 3732 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3733 * @phba: pointer to lpfc hba data structure. 3734 * @acqe_link: pointer to the async link completion queue entry. 3735 * 3736 * This routine is to parse the SLI4 link attention type and translate it 3737 * into the base driver's link attention type coding. 3738 * 3739 * Return: Link attention type in terms of base driver's coding. 3740 **/ 3741 static uint8_t 3742 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3743 struct lpfc_acqe_link *acqe_link) 3744 { 3745 uint8_t att_type; 3746 3747 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3748 case LPFC_ASYNC_LINK_STATUS_DOWN: 3749 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3750 att_type = LPFC_ATT_LINK_DOWN; 3751 break; 3752 case LPFC_ASYNC_LINK_STATUS_UP: 3753 /* Ignore physical link up events - wait for logical link up */ 3754 att_type = LPFC_ATT_RESERVED; 3755 break; 3756 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3757 att_type = LPFC_ATT_LINK_UP; 3758 break; 3759 default: 3760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3761 "0399 Invalid link attention type: x%x\n", 3762 bf_get(lpfc_acqe_link_status, acqe_link)); 3763 att_type = LPFC_ATT_RESERVED; 3764 break; 3765 } 3766 return att_type; 3767 } 3768 3769 /** 3770 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3771 * @phba: pointer to lpfc hba data structure. 3772 * 3773 * This routine is to get an SLI3 FC port's link speed in Mbps. 3774 * 3775 * Return: link speed in terms of Mbps. 3776 **/ 3777 uint32_t 3778 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3779 { 3780 uint32_t link_speed; 3781 3782 if (!lpfc_is_link_up(phba)) 3783 return 0; 3784 3785 if (phba->sli_rev <= LPFC_SLI_REV3) { 3786 switch (phba->fc_linkspeed) { 3787 case LPFC_LINK_SPEED_1GHZ: 3788 link_speed = 1000; 3789 break; 3790 case LPFC_LINK_SPEED_2GHZ: 3791 link_speed = 2000; 3792 break; 3793 case LPFC_LINK_SPEED_4GHZ: 3794 link_speed = 4000; 3795 break; 3796 case LPFC_LINK_SPEED_8GHZ: 3797 link_speed = 8000; 3798 break; 3799 case LPFC_LINK_SPEED_10GHZ: 3800 link_speed = 10000; 3801 break; 3802 case LPFC_LINK_SPEED_16GHZ: 3803 link_speed = 16000; 3804 break; 3805 default: 3806 link_speed = 0; 3807 } 3808 } else { 3809 if (phba->sli4_hba.link_state.logical_speed) 3810 link_speed = 3811 phba->sli4_hba.link_state.logical_speed; 3812 else 3813 link_speed = phba->sli4_hba.link_state.speed; 3814 } 3815 return link_speed; 3816 } 3817 3818 /** 3819 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3820 * @phba: pointer to lpfc hba data structure. 3821 * @evt_code: asynchronous event code. 3822 * @speed_code: asynchronous event link speed code. 3823 * 3824 * This routine is to parse the giving SLI4 async event link speed code into 3825 * value of Mbps for the link speed. 3826 * 3827 * Return: link speed in terms of Mbps. 3828 **/ 3829 static uint32_t 3830 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3831 uint8_t speed_code) 3832 { 3833 uint32_t port_speed; 3834 3835 switch (evt_code) { 3836 case LPFC_TRAILER_CODE_LINK: 3837 switch (speed_code) { 3838 case LPFC_ASYNC_LINK_SPEED_ZERO: 3839 port_speed = 0; 3840 break; 3841 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3842 port_speed = 10; 3843 break; 3844 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3845 port_speed = 100; 3846 break; 3847 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3848 port_speed = 1000; 3849 break; 3850 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3851 port_speed = 10000; 3852 break; 3853 case LPFC_ASYNC_LINK_SPEED_20GBPS: 3854 port_speed = 20000; 3855 break; 3856 case LPFC_ASYNC_LINK_SPEED_25GBPS: 3857 port_speed = 25000; 3858 break; 3859 case LPFC_ASYNC_LINK_SPEED_40GBPS: 3860 port_speed = 40000; 3861 break; 3862 default: 3863 port_speed = 0; 3864 } 3865 break; 3866 case LPFC_TRAILER_CODE_FC: 3867 switch (speed_code) { 3868 case LPFC_FC_LA_SPEED_UNKNOWN: 3869 port_speed = 0; 3870 break; 3871 case LPFC_FC_LA_SPEED_1G: 3872 port_speed = 1000; 3873 break; 3874 case LPFC_FC_LA_SPEED_2G: 3875 port_speed = 2000; 3876 break; 3877 case LPFC_FC_LA_SPEED_4G: 3878 port_speed = 4000; 3879 break; 3880 case LPFC_FC_LA_SPEED_8G: 3881 port_speed = 8000; 3882 break; 3883 case LPFC_FC_LA_SPEED_10G: 3884 port_speed = 10000; 3885 break; 3886 case LPFC_FC_LA_SPEED_16G: 3887 port_speed = 16000; 3888 break; 3889 case LPFC_FC_LA_SPEED_32G: 3890 port_speed = 32000; 3891 break; 3892 default: 3893 port_speed = 0; 3894 } 3895 break; 3896 default: 3897 port_speed = 0; 3898 } 3899 return port_speed; 3900 } 3901 3902 /** 3903 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3904 * @phba: pointer to lpfc hba data structure. 3905 * @acqe_link: pointer to the async link completion queue entry. 3906 * 3907 * This routine is to handle the SLI4 asynchronous FCoE link event. 3908 **/ 3909 static void 3910 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3911 struct lpfc_acqe_link *acqe_link) 3912 { 3913 struct lpfc_dmabuf *mp; 3914 LPFC_MBOXQ_t *pmb; 3915 MAILBOX_t *mb; 3916 struct lpfc_mbx_read_top *la; 3917 uint8_t att_type; 3918 int rc; 3919 3920 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3921 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3922 return; 3923 phba->fcoe_eventtag = acqe_link->event_tag; 3924 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3925 if (!pmb) { 3926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3927 "0395 The mboxq allocation failed\n"); 3928 return; 3929 } 3930 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3931 if (!mp) { 3932 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3933 "0396 The lpfc_dmabuf allocation failed\n"); 3934 goto out_free_pmb; 3935 } 3936 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3937 if (!mp->virt) { 3938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3939 "0397 The mbuf allocation failed\n"); 3940 goto out_free_dmabuf; 3941 } 3942 3943 /* Cleanup any outstanding ELS commands */ 3944 lpfc_els_flush_all_cmd(phba); 3945 3946 /* Block ELS IOCBs until we have done process link event */ 3947 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3948 3949 /* Update link event statistics */ 3950 phba->sli.slistat.link_event++; 3951 3952 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3953 lpfc_read_topology(phba, pmb, mp); 3954 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3955 pmb->vport = phba->pport; 3956 3957 /* Keep the link status for extra SLI4 state machine reference */ 3958 phba->sli4_hba.link_state.speed = 3959 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3960 bf_get(lpfc_acqe_link_speed, acqe_link)); 3961 phba->sli4_hba.link_state.duplex = 3962 bf_get(lpfc_acqe_link_duplex, acqe_link); 3963 phba->sli4_hba.link_state.status = 3964 bf_get(lpfc_acqe_link_status, acqe_link); 3965 phba->sli4_hba.link_state.type = 3966 bf_get(lpfc_acqe_link_type, acqe_link); 3967 phba->sli4_hba.link_state.number = 3968 bf_get(lpfc_acqe_link_number, acqe_link); 3969 phba->sli4_hba.link_state.fault = 3970 bf_get(lpfc_acqe_link_fault, acqe_link); 3971 phba->sli4_hba.link_state.logical_speed = 3972 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3973 3974 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3975 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3976 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3977 "Logical speed:%dMbps Fault:%d\n", 3978 phba->sli4_hba.link_state.speed, 3979 phba->sli4_hba.link_state.topology, 3980 phba->sli4_hba.link_state.status, 3981 phba->sli4_hba.link_state.type, 3982 phba->sli4_hba.link_state.number, 3983 phba->sli4_hba.link_state.logical_speed, 3984 phba->sli4_hba.link_state.fault); 3985 /* 3986 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3987 * topology info. Note: Optional for non FC-AL ports. 3988 */ 3989 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3990 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3991 if (rc == MBX_NOT_FINISHED) 3992 goto out_free_dmabuf; 3993 return; 3994 } 3995 /* 3996 * For FCoE Mode: fill in all the topology information we need and call 3997 * the READ_TOPOLOGY completion routine to continue without actually 3998 * sending the READ_TOPOLOGY mailbox command to the port. 3999 */ 4000 /* Parse and translate status field */ 4001 mb = &pmb->u.mb; 4002 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4003 4004 /* Parse and translate link attention fields */ 4005 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4006 la->eventTag = acqe_link->event_tag; 4007 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4008 bf_set(lpfc_mbx_read_top_link_spd, la, 4009 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4010 4011 /* Fake the the following irrelvant fields */ 4012 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4013 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4014 bf_set(lpfc_mbx_read_top_il, la, 0); 4015 bf_set(lpfc_mbx_read_top_pb, la, 0); 4016 bf_set(lpfc_mbx_read_top_fa, la, 0); 4017 bf_set(lpfc_mbx_read_top_mm, la, 0); 4018 4019 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4020 lpfc_mbx_cmpl_read_topology(phba, pmb); 4021 4022 return; 4023 4024 out_free_dmabuf: 4025 kfree(mp); 4026 out_free_pmb: 4027 mempool_free(pmb, phba->mbox_mem_pool); 4028 } 4029 4030 /** 4031 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4032 * @phba: pointer to lpfc hba data structure. 4033 * @acqe_fc: pointer to the async fc completion queue entry. 4034 * 4035 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4036 * that the event was received and then issue a read_topology mailbox command so 4037 * that the rest of the driver will treat it the same as SLI3. 4038 **/ 4039 static void 4040 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4041 { 4042 struct lpfc_dmabuf *mp; 4043 LPFC_MBOXQ_t *pmb; 4044 MAILBOX_t *mb; 4045 struct lpfc_mbx_read_top *la; 4046 int rc; 4047 4048 if (bf_get(lpfc_trailer_type, acqe_fc) != 4049 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4050 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4051 "2895 Non FC link Event detected.(%d)\n", 4052 bf_get(lpfc_trailer_type, acqe_fc)); 4053 return; 4054 } 4055 /* Keep the link status for extra SLI4 state machine reference */ 4056 phba->sli4_hba.link_state.speed = 4057 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4058 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4059 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4060 phba->sli4_hba.link_state.topology = 4061 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4062 phba->sli4_hba.link_state.status = 4063 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4064 phba->sli4_hba.link_state.type = 4065 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4066 phba->sli4_hba.link_state.number = 4067 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4068 phba->sli4_hba.link_state.fault = 4069 bf_get(lpfc_acqe_link_fault, acqe_fc); 4070 phba->sli4_hba.link_state.logical_speed = 4071 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4073 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4074 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4075 "%dMbps Fault:%d\n", 4076 phba->sli4_hba.link_state.speed, 4077 phba->sli4_hba.link_state.topology, 4078 phba->sli4_hba.link_state.status, 4079 phba->sli4_hba.link_state.type, 4080 phba->sli4_hba.link_state.number, 4081 phba->sli4_hba.link_state.logical_speed, 4082 phba->sli4_hba.link_state.fault); 4083 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4084 if (!pmb) { 4085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4086 "2897 The mboxq allocation failed\n"); 4087 return; 4088 } 4089 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4090 if (!mp) { 4091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4092 "2898 The lpfc_dmabuf allocation failed\n"); 4093 goto out_free_pmb; 4094 } 4095 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4096 if (!mp->virt) { 4097 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4098 "2899 The mbuf allocation failed\n"); 4099 goto out_free_dmabuf; 4100 } 4101 4102 /* Cleanup any outstanding ELS commands */ 4103 lpfc_els_flush_all_cmd(phba); 4104 4105 /* Block ELS IOCBs until we have done process link event */ 4106 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 4107 4108 /* Update link event statistics */ 4109 phba->sli.slistat.link_event++; 4110 4111 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4112 lpfc_read_topology(phba, pmb, mp); 4113 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4114 pmb->vport = phba->pport; 4115 4116 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4117 /* Parse and translate status field */ 4118 mb = &pmb->u.mb; 4119 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4120 (void *)acqe_fc); 4121 4122 /* Parse and translate link attention fields */ 4123 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4124 la->eventTag = acqe_fc->event_tag; 4125 bf_set(lpfc_mbx_read_top_att_type, la, 4126 LPFC_FC_LA_TYPE_LINK_DOWN); 4127 4128 /* Invoke the mailbox command callback function */ 4129 lpfc_mbx_cmpl_read_topology(phba, pmb); 4130 4131 return; 4132 } 4133 4134 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4135 if (rc == MBX_NOT_FINISHED) 4136 goto out_free_dmabuf; 4137 return; 4138 4139 out_free_dmabuf: 4140 kfree(mp); 4141 out_free_pmb: 4142 mempool_free(pmb, phba->mbox_mem_pool); 4143 } 4144 4145 /** 4146 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4147 * @phba: pointer to lpfc hba data structure. 4148 * @acqe_fc: pointer to the async SLI completion queue entry. 4149 * 4150 * This routine is to handle the SLI4 asynchronous SLI events. 4151 **/ 4152 static void 4153 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4154 { 4155 char port_name; 4156 char message[128]; 4157 uint8_t status; 4158 uint8_t evt_type; 4159 uint8_t operational = 0; 4160 struct temp_event temp_event_data; 4161 struct lpfc_acqe_misconfigured_event *misconfigured; 4162 struct Scsi_Host *shost; 4163 4164 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4165 4166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4167 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4168 "x%08x SLI Event Type:%d\n", 4169 acqe_sli->event_data1, acqe_sli->event_data2, 4170 evt_type); 4171 4172 port_name = phba->Port[0]; 4173 if (port_name == 0x00) 4174 port_name = '?'; /* get port name is empty */ 4175 4176 switch (evt_type) { 4177 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4178 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4179 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4180 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4181 4182 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4183 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4184 acqe_sli->event_data1, port_name); 4185 4186 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4187 shost = lpfc_shost_from_vport(phba->pport); 4188 fc_host_post_vendor_event(shost, fc_get_event_number(), 4189 sizeof(temp_event_data), 4190 (char *)&temp_event_data, 4191 SCSI_NL_VID_TYPE_PCI 4192 | PCI_VENDOR_ID_EMULEX); 4193 break; 4194 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4195 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4196 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4197 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4198 4199 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4200 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4201 acqe_sli->event_data1, port_name); 4202 4203 shost = lpfc_shost_from_vport(phba->pport); 4204 fc_host_post_vendor_event(shost, fc_get_event_number(), 4205 sizeof(temp_event_data), 4206 (char *)&temp_event_data, 4207 SCSI_NL_VID_TYPE_PCI 4208 | PCI_VENDOR_ID_EMULEX); 4209 break; 4210 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4211 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4212 &acqe_sli->event_data1; 4213 4214 /* fetch the status for this port */ 4215 switch (phba->sli4_hba.lnk_info.lnk_no) { 4216 case LPFC_LINK_NUMBER_0: 4217 status = bf_get(lpfc_sli_misconfigured_port0_state, 4218 &misconfigured->theEvent); 4219 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4220 &misconfigured->theEvent); 4221 break; 4222 case LPFC_LINK_NUMBER_1: 4223 status = bf_get(lpfc_sli_misconfigured_port1_state, 4224 &misconfigured->theEvent); 4225 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4226 &misconfigured->theEvent); 4227 break; 4228 case LPFC_LINK_NUMBER_2: 4229 status = bf_get(lpfc_sli_misconfigured_port2_state, 4230 &misconfigured->theEvent); 4231 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4232 &misconfigured->theEvent); 4233 break; 4234 case LPFC_LINK_NUMBER_3: 4235 status = bf_get(lpfc_sli_misconfigured_port3_state, 4236 &misconfigured->theEvent); 4237 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4238 &misconfigured->theEvent); 4239 break; 4240 default: 4241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4242 "3296 " 4243 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4244 "event: Invalid link %d", 4245 phba->sli4_hba.lnk_info.lnk_no); 4246 return; 4247 } 4248 4249 /* Skip if optic state unchanged */ 4250 if (phba->sli4_hba.lnk_info.optic_state == status) 4251 return; 4252 4253 switch (status) { 4254 case LPFC_SLI_EVENT_STATUS_VALID: 4255 sprintf(message, "Physical Link is functional"); 4256 break; 4257 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4258 sprintf(message, "Optics faulted/incorrectly " 4259 "installed/not installed - Reseat optics, " 4260 "if issue not resolved, replace."); 4261 break; 4262 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4263 sprintf(message, 4264 "Optics of two types installed - Remove one " 4265 "optic or install matching pair of optics."); 4266 break; 4267 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4268 sprintf(message, "Incompatible optics - Replace with " 4269 "compatible optics for card to function."); 4270 break; 4271 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4272 sprintf(message, "Unqualified optics - Replace with " 4273 "Avago optics for Warranty and Technical " 4274 "Support - Link is%s operational", 4275 (operational) ? "" : " not"); 4276 break; 4277 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4278 sprintf(message, "Uncertified optics - Replace with " 4279 "Avago-certified optics to enable link " 4280 "operation - Link is%s operational", 4281 (operational) ? "" : " not"); 4282 break; 4283 default: 4284 /* firmware is reporting a status we don't know about */ 4285 sprintf(message, "Unknown event status x%02x", status); 4286 break; 4287 } 4288 phba->sli4_hba.lnk_info.optic_state = status; 4289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4290 "3176 Port Name %c %s\n", port_name, message); 4291 break; 4292 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4293 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4294 "3192 Remote DPort Test Initiated - " 4295 "Event Data1:x%08x Event Data2: x%08x\n", 4296 acqe_sli->event_data1, acqe_sli->event_data2); 4297 break; 4298 default: 4299 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4300 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4301 "x%08x SLI Event Type:%d\n", 4302 acqe_sli->event_data1, acqe_sli->event_data2, 4303 evt_type); 4304 break; 4305 } 4306 } 4307 4308 /** 4309 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4310 * @vport: pointer to vport data structure. 4311 * 4312 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4313 * response to a CVL event. 4314 * 4315 * Return the pointer to the ndlp with the vport if successful, otherwise 4316 * return NULL. 4317 **/ 4318 static struct lpfc_nodelist * 4319 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4320 { 4321 struct lpfc_nodelist *ndlp; 4322 struct Scsi_Host *shost; 4323 struct lpfc_hba *phba; 4324 4325 if (!vport) 4326 return NULL; 4327 phba = vport->phba; 4328 if (!phba) 4329 return NULL; 4330 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4331 if (!ndlp) { 4332 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4333 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4334 if (!ndlp) 4335 return 0; 4336 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4337 /* Set the node type */ 4338 ndlp->nlp_type |= NLP_FABRIC; 4339 /* Put ndlp onto node list */ 4340 lpfc_enqueue_node(vport, ndlp); 4341 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4342 /* re-setup ndlp without removing from node list */ 4343 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4344 if (!ndlp) 4345 return 0; 4346 } 4347 if ((phba->pport->port_state < LPFC_FLOGI) && 4348 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4349 return NULL; 4350 /* If virtual link is not yet instantiated ignore CVL */ 4351 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4352 && (vport->port_state != LPFC_VPORT_FAILED)) 4353 return NULL; 4354 shost = lpfc_shost_from_vport(vport); 4355 if (!shost) 4356 return NULL; 4357 lpfc_linkdown_port(vport); 4358 lpfc_cleanup_pending_mbox(vport); 4359 spin_lock_irq(shost->host_lock); 4360 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4361 spin_unlock_irq(shost->host_lock); 4362 4363 return ndlp; 4364 } 4365 4366 /** 4367 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4368 * @vport: pointer to lpfc hba data structure. 4369 * 4370 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4371 * response to a FCF dead event. 4372 **/ 4373 static void 4374 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4375 { 4376 struct lpfc_vport **vports; 4377 int i; 4378 4379 vports = lpfc_create_vport_work_array(phba); 4380 if (vports) 4381 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4382 lpfc_sli4_perform_vport_cvl(vports[i]); 4383 lpfc_destroy_vport_work_array(phba, vports); 4384 } 4385 4386 /** 4387 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4388 * @phba: pointer to lpfc hba data structure. 4389 * @acqe_link: pointer to the async fcoe completion queue entry. 4390 * 4391 * This routine is to handle the SLI4 asynchronous fcoe event. 4392 **/ 4393 static void 4394 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4395 struct lpfc_acqe_fip *acqe_fip) 4396 { 4397 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4398 int rc; 4399 struct lpfc_vport *vport; 4400 struct lpfc_nodelist *ndlp; 4401 struct Scsi_Host *shost; 4402 int active_vlink_present; 4403 struct lpfc_vport **vports; 4404 int i; 4405 4406 phba->fc_eventTag = acqe_fip->event_tag; 4407 phba->fcoe_eventtag = acqe_fip->event_tag; 4408 switch (event_type) { 4409 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4410 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4411 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4412 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4413 LOG_DISCOVERY, 4414 "2546 New FCF event, evt_tag:x%x, " 4415 "index:x%x\n", 4416 acqe_fip->event_tag, 4417 acqe_fip->index); 4418 else 4419 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4420 LOG_DISCOVERY, 4421 "2788 FCF param modified event, " 4422 "evt_tag:x%x, index:x%x\n", 4423 acqe_fip->event_tag, 4424 acqe_fip->index); 4425 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4426 /* 4427 * During period of FCF discovery, read the FCF 4428 * table record indexed by the event to update 4429 * FCF roundrobin failover eligible FCF bmask. 4430 */ 4431 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4432 LOG_DISCOVERY, 4433 "2779 Read FCF (x%x) for updating " 4434 "roundrobin FCF failover bmask\n", 4435 acqe_fip->index); 4436 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4437 } 4438 4439 /* If the FCF discovery is in progress, do nothing. */ 4440 spin_lock_irq(&phba->hbalock); 4441 if (phba->hba_flag & FCF_TS_INPROG) { 4442 spin_unlock_irq(&phba->hbalock); 4443 break; 4444 } 4445 /* If fast FCF failover rescan event is pending, do nothing */ 4446 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4447 spin_unlock_irq(&phba->hbalock); 4448 break; 4449 } 4450 4451 /* If the FCF has been in discovered state, do nothing. */ 4452 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4453 spin_unlock_irq(&phba->hbalock); 4454 break; 4455 } 4456 spin_unlock_irq(&phba->hbalock); 4457 4458 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4459 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4460 "2770 Start FCF table scan per async FCF " 4461 "event, evt_tag:x%x, index:x%x\n", 4462 acqe_fip->event_tag, acqe_fip->index); 4463 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4464 LPFC_FCOE_FCF_GET_FIRST); 4465 if (rc) 4466 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4467 "2547 Issue FCF scan read FCF mailbox " 4468 "command failed (x%x)\n", rc); 4469 break; 4470 4471 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4472 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4473 "2548 FCF Table full count 0x%x tag 0x%x\n", 4474 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4475 acqe_fip->event_tag); 4476 break; 4477 4478 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4479 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4480 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4481 "2549 FCF (x%x) disconnected from network, " 4482 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4483 /* 4484 * If we are in the middle of FCF failover process, clear 4485 * the corresponding FCF bit in the roundrobin bitmap. 4486 */ 4487 spin_lock_irq(&phba->hbalock); 4488 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 4489 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 4490 spin_unlock_irq(&phba->hbalock); 4491 /* Update FLOGI FCF failover eligible FCF bmask */ 4492 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4493 break; 4494 } 4495 spin_unlock_irq(&phba->hbalock); 4496 4497 /* If the event is not for currently used fcf do nothing */ 4498 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4499 break; 4500 4501 /* 4502 * Otherwise, request the port to rediscover the entire FCF 4503 * table for a fast recovery from case that the current FCF 4504 * is no longer valid as we are not in the middle of FCF 4505 * failover process already. 4506 */ 4507 spin_lock_irq(&phba->hbalock); 4508 /* Mark the fast failover process in progress */ 4509 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4510 spin_unlock_irq(&phba->hbalock); 4511 4512 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4513 "2771 Start FCF fast failover process due to " 4514 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4515 "\n", acqe_fip->event_tag, acqe_fip->index); 4516 rc = lpfc_sli4_redisc_fcf_table(phba); 4517 if (rc) { 4518 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4519 LOG_DISCOVERY, 4520 "2772 Issue FCF rediscover mabilbox " 4521 "command failed, fail through to FCF " 4522 "dead event\n"); 4523 spin_lock_irq(&phba->hbalock); 4524 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4525 spin_unlock_irq(&phba->hbalock); 4526 /* 4527 * Last resort will fail over by treating this 4528 * as a link down to FCF registration. 4529 */ 4530 lpfc_sli4_fcf_dead_failthrough(phba); 4531 } else { 4532 /* Reset FCF roundrobin bmask for new discovery */ 4533 lpfc_sli4_clear_fcf_rr_bmask(phba); 4534 /* 4535 * Handling fast FCF failover to a DEAD FCF event is 4536 * considered equalivant to receiving CVL to all vports. 4537 */ 4538 lpfc_sli4_perform_all_vport_cvl(phba); 4539 } 4540 break; 4541 case LPFC_FIP_EVENT_TYPE_CVL: 4542 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4543 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4544 "2718 Clear Virtual Link Received for VPI 0x%x" 4545 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4546 4547 vport = lpfc_find_vport_by_vpid(phba, 4548 acqe_fip->index); 4549 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4550 if (!ndlp) 4551 break; 4552 active_vlink_present = 0; 4553 4554 vports = lpfc_create_vport_work_array(phba); 4555 if (vports) { 4556 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4557 i++) { 4558 if ((!(vports[i]->fc_flag & 4559 FC_VPORT_CVL_RCVD)) && 4560 (vports[i]->port_state > LPFC_FDISC)) { 4561 active_vlink_present = 1; 4562 break; 4563 } 4564 } 4565 lpfc_destroy_vport_work_array(phba, vports); 4566 } 4567 4568 /* 4569 * Don't re-instantiate if vport is marked for deletion. 4570 * If we are here first then vport_delete is going to wait 4571 * for discovery to complete. 4572 */ 4573 if (!(vport->load_flag & FC_UNLOADING) && 4574 active_vlink_present) { 4575 /* 4576 * If there are other active VLinks present, 4577 * re-instantiate the Vlink using FDISC. 4578 */ 4579 mod_timer(&ndlp->nlp_delayfunc, 4580 jiffies + msecs_to_jiffies(1000)); 4581 shost = lpfc_shost_from_vport(vport); 4582 spin_lock_irq(shost->host_lock); 4583 ndlp->nlp_flag |= NLP_DELAY_TMO; 4584 spin_unlock_irq(shost->host_lock); 4585 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4586 vport->port_state = LPFC_FDISC; 4587 } else { 4588 /* 4589 * Otherwise, we request port to rediscover 4590 * the entire FCF table for a fast recovery 4591 * from possible case that the current FCF 4592 * is no longer valid if we are not already 4593 * in the FCF failover process. 4594 */ 4595 spin_lock_irq(&phba->hbalock); 4596 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4597 spin_unlock_irq(&phba->hbalock); 4598 break; 4599 } 4600 /* Mark the fast failover process in progress */ 4601 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4602 spin_unlock_irq(&phba->hbalock); 4603 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4604 LOG_DISCOVERY, 4605 "2773 Start FCF failover per CVL, " 4606 "evt_tag:x%x\n", acqe_fip->event_tag); 4607 rc = lpfc_sli4_redisc_fcf_table(phba); 4608 if (rc) { 4609 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4610 LOG_DISCOVERY, 4611 "2774 Issue FCF rediscover " 4612 "mabilbox command failed, " 4613 "through to CVL event\n"); 4614 spin_lock_irq(&phba->hbalock); 4615 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4616 spin_unlock_irq(&phba->hbalock); 4617 /* 4618 * Last resort will be re-try on the 4619 * the current registered FCF entry. 4620 */ 4621 lpfc_retry_pport_discovery(phba); 4622 } else 4623 /* 4624 * Reset FCF roundrobin bmask for new 4625 * discovery. 4626 */ 4627 lpfc_sli4_clear_fcf_rr_bmask(phba); 4628 } 4629 break; 4630 default: 4631 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4632 "0288 Unknown FCoE event type 0x%x event tag " 4633 "0x%x\n", event_type, acqe_fip->event_tag); 4634 break; 4635 } 4636 } 4637 4638 /** 4639 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4640 * @phba: pointer to lpfc hba data structure. 4641 * @acqe_link: pointer to the async dcbx completion queue entry. 4642 * 4643 * This routine is to handle the SLI4 asynchronous dcbx event. 4644 **/ 4645 static void 4646 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4647 struct lpfc_acqe_dcbx *acqe_dcbx) 4648 { 4649 phba->fc_eventTag = acqe_dcbx->event_tag; 4650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4651 "0290 The SLI4 DCBX asynchronous event is not " 4652 "handled yet\n"); 4653 } 4654 4655 /** 4656 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4657 * @phba: pointer to lpfc hba data structure. 4658 * @acqe_link: pointer to the async grp5 completion queue entry. 4659 * 4660 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4661 * is an asynchronous notified of a logical link speed change. The Port 4662 * reports the logical link speed in units of 10Mbps. 4663 **/ 4664 static void 4665 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4666 struct lpfc_acqe_grp5 *acqe_grp5) 4667 { 4668 uint16_t prev_ll_spd; 4669 4670 phba->fc_eventTag = acqe_grp5->event_tag; 4671 phba->fcoe_eventtag = acqe_grp5->event_tag; 4672 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4673 phba->sli4_hba.link_state.logical_speed = 4674 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4675 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4676 "2789 GRP5 Async Event: Updating logical link speed " 4677 "from %dMbps to %dMbps\n", prev_ll_spd, 4678 phba->sli4_hba.link_state.logical_speed); 4679 } 4680 4681 /** 4682 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4683 * @phba: pointer to lpfc hba data structure. 4684 * 4685 * This routine is invoked by the worker thread to process all the pending 4686 * SLI4 asynchronous events. 4687 **/ 4688 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4689 { 4690 struct lpfc_cq_event *cq_event; 4691 4692 /* First, declare the async event has been handled */ 4693 spin_lock_irq(&phba->hbalock); 4694 phba->hba_flag &= ~ASYNC_EVENT; 4695 spin_unlock_irq(&phba->hbalock); 4696 /* Now, handle all the async events */ 4697 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4698 /* Get the first event from the head of the event queue */ 4699 spin_lock_irq(&phba->hbalock); 4700 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4701 cq_event, struct lpfc_cq_event, list); 4702 spin_unlock_irq(&phba->hbalock); 4703 /* Process the asynchronous event */ 4704 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4705 case LPFC_TRAILER_CODE_LINK: 4706 lpfc_sli4_async_link_evt(phba, 4707 &cq_event->cqe.acqe_link); 4708 break; 4709 case LPFC_TRAILER_CODE_FCOE: 4710 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4711 break; 4712 case LPFC_TRAILER_CODE_DCBX: 4713 lpfc_sli4_async_dcbx_evt(phba, 4714 &cq_event->cqe.acqe_dcbx); 4715 break; 4716 case LPFC_TRAILER_CODE_GRP5: 4717 lpfc_sli4_async_grp5_evt(phba, 4718 &cq_event->cqe.acqe_grp5); 4719 break; 4720 case LPFC_TRAILER_CODE_FC: 4721 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4722 break; 4723 case LPFC_TRAILER_CODE_SLI: 4724 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4725 break; 4726 default: 4727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4728 "1804 Invalid asynchrous event code: " 4729 "x%x\n", bf_get(lpfc_trailer_code, 4730 &cq_event->cqe.mcqe_cmpl)); 4731 break; 4732 } 4733 /* Free the completion event processed to the free pool */ 4734 lpfc_sli4_cq_event_release(phba, cq_event); 4735 } 4736 } 4737 4738 /** 4739 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4740 * @phba: pointer to lpfc hba data structure. 4741 * 4742 * This routine is invoked by the worker thread to process FCF table 4743 * rediscovery pending completion event. 4744 **/ 4745 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4746 { 4747 int rc; 4748 4749 spin_lock_irq(&phba->hbalock); 4750 /* Clear FCF rediscovery timeout event */ 4751 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4752 /* Clear driver fast failover FCF record flag */ 4753 phba->fcf.failover_rec.flag = 0; 4754 /* Set state for FCF fast failover */ 4755 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4756 spin_unlock_irq(&phba->hbalock); 4757 4758 /* Scan FCF table from the first entry to re-discover SAN */ 4759 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4760 "2777 Start post-quiescent FCF table scan\n"); 4761 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4762 if (rc) 4763 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4764 "2747 Issue FCF scan read FCF mailbox " 4765 "command failed 0x%x\n", rc); 4766 } 4767 4768 /** 4769 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4770 * @phba: pointer to lpfc hba data structure. 4771 * @dev_grp: The HBA PCI-Device group number. 4772 * 4773 * This routine is invoked to set up the per HBA PCI-Device group function 4774 * API jump table entries. 4775 * 4776 * Return: 0 if success, otherwise -ENODEV 4777 **/ 4778 int 4779 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4780 { 4781 int rc; 4782 4783 /* Set up lpfc PCI-device group */ 4784 phba->pci_dev_grp = dev_grp; 4785 4786 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4787 if (dev_grp == LPFC_PCI_DEV_OC) 4788 phba->sli_rev = LPFC_SLI_REV4; 4789 4790 /* Set up device INIT API function jump table */ 4791 rc = lpfc_init_api_table_setup(phba, dev_grp); 4792 if (rc) 4793 return -ENODEV; 4794 /* Set up SCSI API function jump table */ 4795 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4796 if (rc) 4797 return -ENODEV; 4798 /* Set up SLI API function jump table */ 4799 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4800 if (rc) 4801 return -ENODEV; 4802 /* Set up MBOX API function jump table */ 4803 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4804 if (rc) 4805 return -ENODEV; 4806 4807 return 0; 4808 } 4809 4810 /** 4811 * lpfc_log_intr_mode - Log the active interrupt mode 4812 * @phba: pointer to lpfc hba data structure. 4813 * @intr_mode: active interrupt mode adopted. 4814 * 4815 * This routine it invoked to log the currently used active interrupt mode 4816 * to the device. 4817 **/ 4818 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4819 { 4820 switch (intr_mode) { 4821 case 0: 4822 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4823 "0470 Enable INTx interrupt mode.\n"); 4824 break; 4825 case 1: 4826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4827 "0481 Enabled MSI interrupt mode.\n"); 4828 break; 4829 case 2: 4830 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4831 "0480 Enabled MSI-X interrupt mode.\n"); 4832 break; 4833 default: 4834 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4835 "0482 Illegal interrupt mode.\n"); 4836 break; 4837 } 4838 return; 4839 } 4840 4841 /** 4842 * lpfc_enable_pci_dev - Enable a generic PCI device. 4843 * @phba: pointer to lpfc hba data structure. 4844 * 4845 * This routine is invoked to enable the PCI device that is common to all 4846 * PCI devices. 4847 * 4848 * Return codes 4849 * 0 - successful 4850 * other values - error 4851 **/ 4852 static int 4853 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4854 { 4855 struct pci_dev *pdev; 4856 4857 /* Obtain PCI device reference */ 4858 if (!phba->pcidev) 4859 goto out_error; 4860 else 4861 pdev = phba->pcidev; 4862 /* Enable PCI device */ 4863 if (pci_enable_device_mem(pdev)) 4864 goto out_error; 4865 /* Request PCI resource for the device */ 4866 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 4867 goto out_disable_device; 4868 /* Set up device as PCI master and save state for EEH */ 4869 pci_set_master(pdev); 4870 pci_try_set_mwi(pdev); 4871 pci_save_state(pdev); 4872 4873 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4874 if (pci_is_pcie(pdev)) 4875 pdev->needs_freset = 1; 4876 4877 return 0; 4878 4879 out_disable_device: 4880 pci_disable_device(pdev); 4881 out_error: 4882 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4883 "1401 Failed to enable pci device\n"); 4884 return -ENODEV; 4885 } 4886 4887 /** 4888 * lpfc_disable_pci_dev - Disable a generic PCI device. 4889 * @phba: pointer to lpfc hba data structure. 4890 * 4891 * This routine is invoked to disable the PCI device that is common to all 4892 * PCI devices. 4893 **/ 4894 static void 4895 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4896 { 4897 struct pci_dev *pdev; 4898 4899 /* Obtain PCI device reference */ 4900 if (!phba->pcidev) 4901 return; 4902 else 4903 pdev = phba->pcidev; 4904 /* Release PCI resource and disable PCI device */ 4905 pci_release_mem_regions(pdev); 4906 pci_disable_device(pdev); 4907 4908 return; 4909 } 4910 4911 /** 4912 * lpfc_reset_hba - Reset a hba 4913 * @phba: pointer to lpfc hba data structure. 4914 * 4915 * This routine is invoked to reset a hba device. It brings the HBA 4916 * offline, performs a board restart, and then brings the board back 4917 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4918 * on outstanding mailbox commands. 4919 **/ 4920 void 4921 lpfc_reset_hba(struct lpfc_hba *phba) 4922 { 4923 /* If resets are disabled then set error state and return. */ 4924 if (!phba->cfg_enable_hba_reset) { 4925 phba->link_state = LPFC_HBA_ERROR; 4926 return; 4927 } 4928 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4929 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4930 else 4931 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4932 lpfc_offline(phba); 4933 lpfc_sli_brdrestart(phba); 4934 lpfc_online(phba); 4935 lpfc_unblock_mgmt_io(phba); 4936 } 4937 4938 /** 4939 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4940 * @phba: pointer to lpfc hba data structure. 4941 * 4942 * This function enables the PCI SR-IOV virtual functions to a physical 4943 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4944 * enable the number of virtual functions to the physical function. As 4945 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4946 * API call does not considered as an error condition for most of the device. 4947 **/ 4948 uint16_t 4949 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4950 { 4951 struct pci_dev *pdev = phba->pcidev; 4952 uint16_t nr_virtfn; 4953 int pos; 4954 4955 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4956 if (pos == 0) 4957 return 0; 4958 4959 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4960 return nr_virtfn; 4961 } 4962 4963 /** 4964 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4965 * @phba: pointer to lpfc hba data structure. 4966 * @nr_vfn: number of virtual functions to be enabled. 4967 * 4968 * This function enables the PCI SR-IOV virtual functions to a physical 4969 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4970 * enable the number of virtual functions to the physical function. As 4971 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4972 * API call does not considered as an error condition for most of the device. 4973 **/ 4974 int 4975 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4976 { 4977 struct pci_dev *pdev = phba->pcidev; 4978 uint16_t max_nr_vfn; 4979 int rc; 4980 4981 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4982 if (nr_vfn > max_nr_vfn) { 4983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4984 "3057 Requested vfs (%d) greater than " 4985 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4986 return -EINVAL; 4987 } 4988 4989 rc = pci_enable_sriov(pdev, nr_vfn); 4990 if (rc) { 4991 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4992 "2806 Failed to enable sriov on this device " 4993 "with vfn number nr_vf:%d, rc:%d\n", 4994 nr_vfn, rc); 4995 } else 4996 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4997 "2807 Successful enable sriov on this device " 4998 "with vfn number nr_vf:%d\n", nr_vfn); 4999 return rc; 5000 } 5001 5002 /** 5003 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 5004 * @phba: pointer to lpfc hba data structure. 5005 * 5006 * This routine is invoked to set up the driver internal resources specific to 5007 * support the SLI-3 HBA device it attached to. 5008 * 5009 * Return codes 5010 * 0 - successful 5011 * other values - error 5012 **/ 5013 static int 5014 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5015 { 5016 struct lpfc_sli *psli; 5017 int rc; 5018 5019 /* 5020 * Initialize timers used by driver 5021 */ 5022 5023 /* Heartbeat timer */ 5024 init_timer(&phba->hb_tmofunc); 5025 phba->hb_tmofunc.function = lpfc_hb_timeout; 5026 phba->hb_tmofunc.data = (unsigned long)phba; 5027 5028 psli = &phba->sli; 5029 /* MBOX heartbeat timer */ 5030 init_timer(&psli->mbox_tmo); 5031 psli->mbox_tmo.function = lpfc_mbox_timeout; 5032 psli->mbox_tmo.data = (unsigned long) phba; 5033 /* FCP polling mode timer */ 5034 init_timer(&phba->fcp_poll_timer); 5035 phba->fcp_poll_timer.function = lpfc_poll_timeout; 5036 phba->fcp_poll_timer.data = (unsigned long) phba; 5037 /* Fabric block timer */ 5038 init_timer(&phba->fabric_block_timer); 5039 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5040 phba->fabric_block_timer.data = (unsigned long) phba; 5041 /* EA polling mode timer */ 5042 init_timer(&phba->eratt_poll); 5043 phba->eratt_poll.function = lpfc_poll_eratt; 5044 phba->eratt_poll.data = (unsigned long) phba; 5045 5046 /* Host attention work mask setup */ 5047 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5048 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5049 5050 /* Get all the module params for configuring this host */ 5051 lpfc_get_cfgparam(phba); 5052 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5053 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5054 /* check for menlo minimum sg count */ 5055 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5056 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5057 } 5058 5059 if (!phba->sli.ring) 5060 phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING * 5061 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5062 if (!phba->sli.ring) 5063 return -ENOMEM; 5064 5065 /* 5066 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5067 * used to create the sg_dma_buf_pool must be dynamically calculated. 5068 */ 5069 5070 /* Initialize the host templates the configured values. */ 5071 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5072 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; 5073 5074 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5075 if (phba->cfg_enable_bg) { 5076 /* 5077 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5078 * the FCP rsp, and a BDE for each. Sice we have no control 5079 * over how many protection data segments the SCSI Layer 5080 * will hand us (ie: there could be one for every block 5081 * in the IO), we just allocate enough BDEs to accomidate 5082 * our max amount and we need to limit lpfc_sg_seg_cnt to 5083 * minimize the risk of running out. 5084 */ 5085 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5086 sizeof(struct fcp_rsp) + 5087 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5088 5089 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5090 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5091 5092 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5093 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5094 } else { 5095 /* 5096 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5097 * the FCP rsp, a BDE for each, and a BDE for up to 5098 * cfg_sg_seg_cnt data segments. 5099 */ 5100 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5101 sizeof(struct fcp_rsp) + 5102 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5103 5104 /* Total BDEs in BPL for scsi_sg_list */ 5105 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5106 } 5107 5108 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5109 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5110 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5111 phba->cfg_total_seg_cnt); 5112 5113 phba->max_vpi = LPFC_MAX_VPI; 5114 /* This will be set to correct value after config_port mbox */ 5115 phba->max_vports = 0; 5116 5117 /* 5118 * Initialize the SLI Layer to run with lpfc HBAs. 5119 */ 5120 lpfc_sli_setup(phba); 5121 lpfc_sli_queue_setup(phba); 5122 5123 /* Allocate device driver memory */ 5124 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5125 return -ENOMEM; 5126 5127 /* 5128 * Enable sr-iov virtual functions if supported and configured 5129 * through the module parameter. 5130 */ 5131 if (phba->cfg_sriov_nr_virtfn > 0) { 5132 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5133 phba->cfg_sriov_nr_virtfn); 5134 if (rc) { 5135 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5136 "2808 Requested number of SR-IOV " 5137 "virtual functions (%d) is not " 5138 "supported\n", 5139 phba->cfg_sriov_nr_virtfn); 5140 phba->cfg_sriov_nr_virtfn = 0; 5141 } 5142 } 5143 5144 return 0; 5145 } 5146 5147 /** 5148 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5149 * @phba: pointer to lpfc hba data structure. 5150 * 5151 * This routine is invoked to unset the driver internal resources set up 5152 * specific for supporting the SLI-3 HBA device it attached to. 5153 **/ 5154 static void 5155 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5156 { 5157 /* Free device driver memory allocated */ 5158 lpfc_mem_free_all(phba); 5159 5160 return; 5161 } 5162 5163 /** 5164 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5165 * @phba: pointer to lpfc hba data structure. 5166 * 5167 * This routine is invoked to set up the driver internal resources specific to 5168 * support the SLI-4 HBA device it attached to. 5169 * 5170 * Return codes 5171 * 0 - successful 5172 * other values - error 5173 **/ 5174 static int 5175 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5176 { 5177 struct lpfc_vector_map_info *cpup; 5178 struct lpfc_sli *psli; 5179 LPFC_MBOXQ_t *mboxq; 5180 int rc, i, hbq_count, max_buf_size; 5181 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5182 struct lpfc_mqe *mqe; 5183 int longs; 5184 int fof_vectors = 0; 5185 5186 /* Get all the module params for configuring this host */ 5187 lpfc_get_cfgparam(phba); 5188 5189 /* Before proceed, wait for POST done and device ready */ 5190 rc = lpfc_sli4_post_status_check(phba); 5191 if (rc) 5192 return -ENODEV; 5193 5194 /* 5195 * Initialize timers used by driver 5196 */ 5197 5198 /* Heartbeat timer */ 5199 init_timer(&phba->hb_tmofunc); 5200 phba->hb_tmofunc.function = lpfc_hb_timeout; 5201 phba->hb_tmofunc.data = (unsigned long)phba; 5202 init_timer(&phba->rrq_tmr); 5203 phba->rrq_tmr.function = lpfc_rrq_timeout; 5204 phba->rrq_tmr.data = (unsigned long)phba; 5205 5206 psli = &phba->sli; 5207 /* MBOX heartbeat timer */ 5208 init_timer(&psli->mbox_tmo); 5209 psli->mbox_tmo.function = lpfc_mbox_timeout; 5210 psli->mbox_tmo.data = (unsigned long) phba; 5211 /* Fabric block timer */ 5212 init_timer(&phba->fabric_block_timer); 5213 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5214 phba->fabric_block_timer.data = (unsigned long) phba; 5215 /* EA polling mode timer */ 5216 init_timer(&phba->eratt_poll); 5217 phba->eratt_poll.function = lpfc_poll_eratt; 5218 phba->eratt_poll.data = (unsigned long) phba; 5219 /* FCF rediscover timer */ 5220 init_timer(&phba->fcf.redisc_wait); 5221 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5222 phba->fcf.redisc_wait.data = (unsigned long)phba; 5223 5224 /* 5225 * Control structure for handling external multi-buffer mailbox 5226 * command pass-through. 5227 */ 5228 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5229 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5230 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5231 5232 phba->max_vpi = LPFC_MAX_VPI; 5233 5234 /* This will be set to correct value after the read_config mbox */ 5235 phba->max_vports = 0; 5236 5237 /* Program the default value of vlan_id and fc_map */ 5238 phba->valid_vlan = 0; 5239 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5240 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5241 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5242 5243 /* 5244 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5245 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 5246 */ 5247 if (!phba->sli.ring) 5248 phba->sli.ring = kzalloc( 5249 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 5250 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5251 if (!phba->sli.ring) 5252 return -ENOMEM; 5253 5254 /* 5255 * It doesn't matter what family our adapter is in, we are 5256 * limited to 2 Pages, 512 SGEs, for our SGL. 5257 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5258 */ 5259 max_buf_size = (2 * SLI4_PAGE_SIZE); 5260 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5261 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5262 5263 /* 5264 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5265 * used to create the sg_dma_buf_pool must be dynamically calculated. 5266 */ 5267 5268 if (phba->cfg_enable_bg) { 5269 /* 5270 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5271 * the FCP rsp, and a SGE for each. Sice we have no control 5272 * over how many protection data segments the SCSI Layer 5273 * will hand us (ie: there could be one for every block 5274 * in the IO), we just allocate enough SGEs to accomidate 5275 * our max amount and we need to limit lpfc_sg_seg_cnt to 5276 * minimize the risk of running out. 5277 */ 5278 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5279 sizeof(struct fcp_rsp) + max_buf_size; 5280 5281 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5282 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5283 5284 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5285 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5286 } else { 5287 /* 5288 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5289 * the FCP rsp, a SGE for each, and a SGE for up to 5290 * cfg_sg_seg_cnt data segments. 5291 */ 5292 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5293 sizeof(struct fcp_rsp) + 5294 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 5295 5296 /* Total SGEs for scsi_sg_list */ 5297 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5298 /* 5299 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 5300 * to post 1 page for the SGL. 5301 */ 5302 } 5303 5304 /* Initialize the host templates with the updated values. */ 5305 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5306 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5307 5308 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5309 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5310 else 5311 phba->cfg_sg_dma_buf_size = 5312 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5313 5314 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5315 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5316 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5317 phba->cfg_total_seg_cnt); 5318 5319 /* Initialize buffer queue management fields */ 5320 hbq_count = lpfc_sli_hbq_count(); 5321 for (i = 0; i < hbq_count; ++i) 5322 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5323 INIT_LIST_HEAD(&phba->rb_pend_list); 5324 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5325 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5326 5327 /* 5328 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5329 */ 5330 /* Initialize the Abort scsi buffer list used by driver */ 5331 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5332 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5333 /* This abort list used by worker thread */ 5334 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5335 5336 /* 5337 * Initialize driver internal slow-path work queues 5338 */ 5339 5340 /* Driver internel slow-path CQ Event pool */ 5341 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5342 /* Response IOCB work queue list */ 5343 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5344 /* Asynchronous event CQ Event work queue list */ 5345 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5346 /* Fast-path XRI aborted CQ Event work queue list */ 5347 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5348 /* Slow-path XRI aborted CQ Event work queue list */ 5349 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5350 /* Receive queue CQ Event work queue list */ 5351 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5352 5353 /* Initialize extent block lists. */ 5354 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5355 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5357 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5358 5359 /* initialize optic_state to 0xFF */ 5360 phba->sli4_hba.lnk_info.optic_state = 0xff; 5361 5362 /* Initialize the driver internal SLI layer lists. */ 5363 lpfc_sli_setup(phba); 5364 lpfc_sli_queue_setup(phba); 5365 5366 /* Allocate device driver memory */ 5367 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5368 if (rc) 5369 return -ENOMEM; 5370 5371 /* IF Type 2 ports get initialized now. */ 5372 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5373 LPFC_SLI_INTF_IF_TYPE_2) { 5374 rc = lpfc_pci_function_reset(phba); 5375 if (unlikely(rc)) 5376 return -ENODEV; 5377 phba->temp_sensor_support = 1; 5378 } 5379 5380 /* Create the bootstrap mailbox command */ 5381 rc = lpfc_create_bootstrap_mbox(phba); 5382 if (unlikely(rc)) 5383 goto out_free_mem; 5384 5385 /* Set up the host's endian order with the device. */ 5386 rc = lpfc_setup_endian_order(phba); 5387 if (unlikely(rc)) 5388 goto out_free_bsmbx; 5389 5390 /* Set up the hba's configuration parameters. */ 5391 rc = lpfc_sli4_read_config(phba); 5392 if (unlikely(rc)) 5393 goto out_free_bsmbx; 5394 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5395 if (unlikely(rc)) 5396 goto out_free_bsmbx; 5397 5398 /* IF Type 0 ports get initialized now. */ 5399 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5400 LPFC_SLI_INTF_IF_TYPE_0) { 5401 rc = lpfc_pci_function_reset(phba); 5402 if (unlikely(rc)) 5403 goto out_free_bsmbx; 5404 } 5405 5406 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5407 GFP_KERNEL); 5408 if (!mboxq) { 5409 rc = -ENOMEM; 5410 goto out_free_bsmbx; 5411 } 5412 5413 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5414 lpfc_supported_pages(mboxq); 5415 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5416 if (!rc) { 5417 mqe = &mboxq->u.mqe; 5418 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5419 LPFC_MAX_SUPPORTED_PAGES); 5420 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5421 switch (pn_page[i]) { 5422 case LPFC_SLI4_PARAMETERS: 5423 phba->sli4_hba.pc_sli4_params.supported = 1; 5424 break; 5425 default: 5426 break; 5427 } 5428 } 5429 /* Read the port's SLI4 Parameters capabilities if supported. */ 5430 if (phba->sli4_hba.pc_sli4_params.supported) 5431 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5432 if (rc) { 5433 mempool_free(mboxq, phba->mbox_mem_pool); 5434 rc = -EIO; 5435 goto out_free_bsmbx; 5436 } 5437 } 5438 5439 /* 5440 * Get sli4 parameters that override parameters from Port capabilities. 5441 * If this call fails, it isn't critical unless the SLI4 parameters come 5442 * back in conflict. 5443 */ 5444 rc = lpfc_get_sli4_parameters(phba, mboxq); 5445 if (rc) { 5446 if (phba->sli4_hba.extents_in_use && 5447 phba->sli4_hba.rpi_hdrs_in_use) { 5448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5449 "2999 Unsupported SLI4 Parameters " 5450 "Extents and RPI headers enabled.\n"); 5451 goto out_free_bsmbx; 5452 } 5453 } 5454 mempool_free(mboxq, phba->mbox_mem_pool); 5455 5456 /* Verify OAS is supported */ 5457 lpfc_sli4_oas_verify(phba); 5458 if (phba->cfg_fof) 5459 fof_vectors = 1; 5460 5461 /* Verify all the SLI4 queues */ 5462 rc = lpfc_sli4_queue_verify(phba); 5463 if (rc) 5464 goto out_free_bsmbx; 5465 5466 /* Create driver internal CQE event pool */ 5467 rc = lpfc_sli4_cq_event_pool_create(phba); 5468 if (rc) 5469 goto out_free_bsmbx; 5470 5471 /* Initialize sgl lists per host */ 5472 lpfc_init_sgl_list(phba); 5473 5474 /* Allocate and initialize active sgl array */ 5475 rc = lpfc_init_active_sgl_array(phba); 5476 if (rc) { 5477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5478 "1430 Failed to initialize sgl list.\n"); 5479 goto out_destroy_cq_event_pool; 5480 } 5481 rc = lpfc_sli4_init_rpi_hdrs(phba); 5482 if (rc) { 5483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5484 "1432 Failed to initialize rpi headers.\n"); 5485 goto out_free_active_sgl; 5486 } 5487 5488 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5489 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5490 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5491 GFP_KERNEL); 5492 if (!phba->fcf.fcf_rr_bmask) { 5493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5494 "2759 Failed allocate memory for FCF round " 5495 "robin failover bmask\n"); 5496 rc = -ENOMEM; 5497 goto out_remove_rpi_hdrs; 5498 } 5499 5500 phba->sli4_hba.fcp_eq_hdl = 5501 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5502 (fof_vectors + phba->cfg_fcp_io_channel)), 5503 GFP_KERNEL); 5504 if (!phba->sli4_hba.fcp_eq_hdl) { 5505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5506 "2572 Failed allocate memory for " 5507 "fast-path per-EQ handle array\n"); 5508 rc = -ENOMEM; 5509 goto out_free_fcf_rr_bmask; 5510 } 5511 5512 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5513 (fof_vectors + 5514 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5515 if (!phba->sli4_hba.msix_entries) { 5516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5517 "2573 Failed allocate memory for msi-x " 5518 "interrupt vector entries\n"); 5519 rc = -ENOMEM; 5520 goto out_free_fcp_eq_hdl; 5521 } 5522 5523 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5524 phba->sli4_hba.num_present_cpu), 5525 GFP_KERNEL); 5526 if (!phba->sli4_hba.cpu_map) { 5527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5528 "3327 Failed allocate memory for msi-x " 5529 "interrupt vector mapping\n"); 5530 rc = -ENOMEM; 5531 goto out_free_msix; 5532 } 5533 if (lpfc_used_cpu == NULL) { 5534 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5535 GFP_KERNEL); 5536 if (!lpfc_used_cpu) { 5537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5538 "3335 Failed allocate memory for msi-x " 5539 "interrupt vector mapping\n"); 5540 kfree(phba->sli4_hba.cpu_map); 5541 rc = -ENOMEM; 5542 goto out_free_msix; 5543 } 5544 for (i = 0; i < lpfc_present_cpu; i++) 5545 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5546 } 5547 5548 /* Initialize io channels for round robin */ 5549 cpup = phba->sli4_hba.cpu_map; 5550 rc = 0; 5551 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5552 cpup->channel_id = rc; 5553 rc++; 5554 if (rc >= phba->cfg_fcp_io_channel) 5555 rc = 0; 5556 } 5557 5558 /* 5559 * Enable sr-iov virtual functions if supported and configured 5560 * through the module parameter. 5561 */ 5562 if (phba->cfg_sriov_nr_virtfn > 0) { 5563 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5564 phba->cfg_sriov_nr_virtfn); 5565 if (rc) { 5566 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5567 "3020 Requested number of SR-IOV " 5568 "virtual functions (%d) is not " 5569 "supported\n", 5570 phba->cfg_sriov_nr_virtfn); 5571 phba->cfg_sriov_nr_virtfn = 0; 5572 } 5573 } 5574 5575 return 0; 5576 5577 out_free_msix: 5578 kfree(phba->sli4_hba.msix_entries); 5579 out_free_fcp_eq_hdl: 5580 kfree(phba->sli4_hba.fcp_eq_hdl); 5581 out_free_fcf_rr_bmask: 5582 kfree(phba->fcf.fcf_rr_bmask); 5583 out_remove_rpi_hdrs: 5584 lpfc_sli4_remove_rpi_hdrs(phba); 5585 out_free_active_sgl: 5586 lpfc_free_active_sgl(phba); 5587 out_destroy_cq_event_pool: 5588 lpfc_sli4_cq_event_pool_destroy(phba); 5589 out_free_bsmbx: 5590 lpfc_destroy_bootstrap_mbox(phba); 5591 out_free_mem: 5592 lpfc_mem_free(phba); 5593 return rc; 5594 } 5595 5596 /** 5597 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5598 * @phba: pointer to lpfc hba data structure. 5599 * 5600 * This routine is invoked to unset the driver internal resources set up 5601 * specific for supporting the SLI-4 HBA device it attached to. 5602 **/ 5603 static void 5604 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5605 { 5606 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5607 5608 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5609 kfree(phba->sli4_hba.cpu_map); 5610 phba->sli4_hba.num_present_cpu = 0; 5611 phba->sli4_hba.num_online_cpu = 0; 5612 phba->sli4_hba.curr_disp_cpu = 0; 5613 5614 /* Free memory allocated for msi-x interrupt vector entries */ 5615 kfree(phba->sli4_hba.msix_entries); 5616 5617 /* Free memory allocated for fast-path work queue handles */ 5618 kfree(phba->sli4_hba.fcp_eq_hdl); 5619 5620 /* Free the allocated rpi headers. */ 5621 lpfc_sli4_remove_rpi_hdrs(phba); 5622 lpfc_sli4_remove_rpis(phba); 5623 5624 /* Free eligible FCF index bmask */ 5625 kfree(phba->fcf.fcf_rr_bmask); 5626 5627 /* Free the ELS sgl list */ 5628 lpfc_free_active_sgl(phba); 5629 lpfc_free_els_sgl_list(phba); 5630 5631 /* Free the completion queue EQ event pool */ 5632 lpfc_sli4_cq_event_release_all(phba); 5633 lpfc_sli4_cq_event_pool_destroy(phba); 5634 5635 /* Release resource identifiers. */ 5636 lpfc_sli4_dealloc_resource_identifiers(phba); 5637 5638 /* Free the bsmbx region. */ 5639 lpfc_destroy_bootstrap_mbox(phba); 5640 5641 /* Free the SLI Layer memory with SLI4 HBAs */ 5642 lpfc_mem_free_all(phba); 5643 5644 /* Free the current connect table */ 5645 list_for_each_entry_safe(conn_entry, next_conn_entry, 5646 &phba->fcf_conn_rec_list, list) { 5647 list_del_init(&conn_entry->list); 5648 kfree(conn_entry); 5649 } 5650 5651 return; 5652 } 5653 5654 /** 5655 * lpfc_init_api_table_setup - Set up init api function jump table 5656 * @phba: The hba struct for which this call is being executed. 5657 * @dev_grp: The HBA PCI-Device group number. 5658 * 5659 * This routine sets up the device INIT interface API function jump table 5660 * in @phba struct. 5661 * 5662 * Returns: 0 - success, -ENODEV - failure. 5663 **/ 5664 int 5665 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5666 { 5667 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5668 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5669 phba->lpfc_selective_reset = lpfc_selective_reset; 5670 switch (dev_grp) { 5671 case LPFC_PCI_DEV_LP: 5672 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5673 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5674 phba->lpfc_stop_port = lpfc_stop_port_s3; 5675 break; 5676 case LPFC_PCI_DEV_OC: 5677 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5678 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5679 phba->lpfc_stop_port = lpfc_stop_port_s4; 5680 break; 5681 default: 5682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5683 "1431 Invalid HBA PCI-device group: 0x%x\n", 5684 dev_grp); 5685 return -ENODEV; 5686 break; 5687 } 5688 return 0; 5689 } 5690 5691 /** 5692 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5693 * @phba: pointer to lpfc hba data structure. 5694 * 5695 * This routine is invoked to set up the driver internal resources before the 5696 * device specific resource setup to support the HBA device it attached to. 5697 * 5698 * Return codes 5699 * 0 - successful 5700 * other values - error 5701 **/ 5702 static int 5703 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5704 { 5705 /* 5706 * Driver resources common to all SLI revisions 5707 */ 5708 atomic_set(&phba->fast_event_count, 0); 5709 spin_lock_init(&phba->hbalock); 5710 5711 /* Initialize ndlp management spinlock */ 5712 spin_lock_init(&phba->ndlp_lock); 5713 5714 INIT_LIST_HEAD(&phba->port_list); 5715 INIT_LIST_HEAD(&phba->work_list); 5716 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5717 5718 /* Initialize the wait queue head for the kernel thread */ 5719 init_waitqueue_head(&phba->work_waitq); 5720 5721 /* Initialize the scsi buffer list used by driver for scsi IO */ 5722 spin_lock_init(&phba->scsi_buf_list_get_lock); 5723 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5724 spin_lock_init(&phba->scsi_buf_list_put_lock); 5725 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5726 5727 /* Initialize the fabric iocb list */ 5728 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5729 5730 /* Initialize list to save ELS buffers */ 5731 INIT_LIST_HEAD(&phba->elsbuf); 5732 5733 /* Initialize FCF connection rec list */ 5734 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5735 5736 /* Initialize OAS configuration list */ 5737 spin_lock_init(&phba->devicelock); 5738 INIT_LIST_HEAD(&phba->luns); 5739 5740 return 0; 5741 } 5742 5743 /** 5744 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5745 * @phba: pointer to lpfc hba data structure. 5746 * 5747 * This routine is invoked to set up the driver internal resources after the 5748 * device specific resource setup to support the HBA device it attached to. 5749 * 5750 * Return codes 5751 * 0 - successful 5752 * other values - error 5753 **/ 5754 static int 5755 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5756 { 5757 int error; 5758 5759 /* Startup the kernel thread for this host adapter. */ 5760 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5761 "lpfc_worker_%d", phba->brd_no); 5762 if (IS_ERR(phba->worker_thread)) { 5763 error = PTR_ERR(phba->worker_thread); 5764 return error; 5765 } 5766 5767 return 0; 5768 } 5769 5770 /** 5771 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5772 * @phba: pointer to lpfc hba data structure. 5773 * 5774 * This routine is invoked to unset the driver internal resources set up after 5775 * the device specific resource setup for supporting the HBA device it 5776 * attached to. 5777 **/ 5778 static void 5779 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5780 { 5781 /* Stop kernel worker thread */ 5782 kthread_stop(phba->worker_thread); 5783 } 5784 5785 /** 5786 * lpfc_free_iocb_list - Free iocb list. 5787 * @phba: pointer to lpfc hba data structure. 5788 * 5789 * This routine is invoked to free the driver's IOCB list and memory. 5790 **/ 5791 static void 5792 lpfc_free_iocb_list(struct lpfc_hba *phba) 5793 { 5794 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5795 5796 spin_lock_irq(&phba->hbalock); 5797 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5798 &phba->lpfc_iocb_list, list) { 5799 list_del(&iocbq_entry->list); 5800 kfree(iocbq_entry); 5801 phba->total_iocbq_bufs--; 5802 } 5803 spin_unlock_irq(&phba->hbalock); 5804 5805 return; 5806 } 5807 5808 /** 5809 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5810 * @phba: pointer to lpfc hba data structure. 5811 * 5812 * This routine is invoked to allocate and initizlize the driver's IOCB 5813 * list and set up the IOCB tag array accordingly. 5814 * 5815 * Return codes 5816 * 0 - successful 5817 * other values - error 5818 **/ 5819 static int 5820 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5821 { 5822 struct lpfc_iocbq *iocbq_entry = NULL; 5823 uint16_t iotag; 5824 int i; 5825 5826 /* Initialize and populate the iocb list per host. */ 5827 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5828 for (i = 0; i < iocb_count; i++) { 5829 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5830 if (iocbq_entry == NULL) { 5831 printk(KERN_ERR "%s: only allocated %d iocbs of " 5832 "expected %d count. Unloading driver.\n", 5833 __func__, i, LPFC_IOCB_LIST_CNT); 5834 goto out_free_iocbq; 5835 } 5836 5837 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5838 if (iotag == 0) { 5839 kfree(iocbq_entry); 5840 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5841 "Unloading driver.\n", __func__); 5842 goto out_free_iocbq; 5843 } 5844 iocbq_entry->sli4_lxritag = NO_XRI; 5845 iocbq_entry->sli4_xritag = NO_XRI; 5846 5847 spin_lock_irq(&phba->hbalock); 5848 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5849 phba->total_iocbq_bufs++; 5850 spin_unlock_irq(&phba->hbalock); 5851 } 5852 5853 return 0; 5854 5855 out_free_iocbq: 5856 lpfc_free_iocb_list(phba); 5857 5858 return -ENOMEM; 5859 } 5860 5861 /** 5862 * lpfc_free_sgl_list - Free a given sgl list. 5863 * @phba: pointer to lpfc hba data structure. 5864 * @sglq_list: pointer to the head of sgl list. 5865 * 5866 * This routine is invoked to free a give sgl list and memory. 5867 **/ 5868 void 5869 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5870 { 5871 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5872 5873 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5874 list_del(&sglq_entry->list); 5875 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5876 kfree(sglq_entry); 5877 } 5878 } 5879 5880 /** 5881 * lpfc_free_els_sgl_list - Free els sgl list. 5882 * @phba: pointer to lpfc hba data structure. 5883 * 5884 * This routine is invoked to free the driver's els sgl list and memory. 5885 **/ 5886 static void 5887 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5888 { 5889 LIST_HEAD(sglq_list); 5890 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5891 5892 /* Retrieve all els sgls from driver list */ 5893 spin_lock_irq(&phba->hbalock); 5894 spin_lock(&pring->ring_lock); 5895 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5896 spin_unlock(&pring->ring_lock); 5897 spin_unlock_irq(&phba->hbalock); 5898 5899 /* Now free the sgl list */ 5900 lpfc_free_sgl_list(phba, &sglq_list); 5901 } 5902 5903 /** 5904 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5905 * @phba: pointer to lpfc hba data structure. 5906 * 5907 * This routine is invoked to allocate the driver's active sgl memory. 5908 * This array will hold the sglq_entry's for active IOs. 5909 **/ 5910 static int 5911 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5912 { 5913 int size; 5914 size = sizeof(struct lpfc_sglq *); 5915 size *= phba->sli4_hba.max_cfg_param.max_xri; 5916 5917 phba->sli4_hba.lpfc_sglq_active_list = 5918 kzalloc(size, GFP_KERNEL); 5919 if (!phba->sli4_hba.lpfc_sglq_active_list) 5920 return -ENOMEM; 5921 return 0; 5922 } 5923 5924 /** 5925 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5926 * @phba: pointer to lpfc hba data structure. 5927 * 5928 * This routine is invoked to walk through the array of active sglq entries 5929 * and free all of the resources. 5930 * This is just a place holder for now. 5931 **/ 5932 static void 5933 lpfc_free_active_sgl(struct lpfc_hba *phba) 5934 { 5935 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5936 } 5937 5938 /** 5939 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5940 * @phba: pointer to lpfc hba data structure. 5941 * 5942 * This routine is invoked to allocate and initizlize the driver's sgl 5943 * list and set up the sgl xritag tag array accordingly. 5944 * 5945 **/ 5946 static void 5947 lpfc_init_sgl_list(struct lpfc_hba *phba) 5948 { 5949 /* Initialize and populate the sglq list per host/VF. */ 5950 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5951 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5952 5953 /* els xri-sgl book keeping */ 5954 phba->sli4_hba.els_xri_cnt = 0; 5955 5956 /* scsi xri-buffer book keeping */ 5957 phba->sli4_hba.scsi_xri_cnt = 0; 5958 } 5959 5960 /** 5961 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5962 * @phba: pointer to lpfc hba data structure. 5963 * 5964 * This routine is invoked to post rpi header templates to the 5965 * port for those SLI4 ports that do not support extents. This routine 5966 * posts a PAGE_SIZE memory region to the port to hold up to 5967 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5968 * and should be called only when interrupts are disabled. 5969 * 5970 * Return codes 5971 * 0 - successful 5972 * -ERROR - otherwise. 5973 **/ 5974 int 5975 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5976 { 5977 int rc = 0; 5978 struct lpfc_rpi_hdr *rpi_hdr; 5979 5980 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5981 if (!phba->sli4_hba.rpi_hdrs_in_use) 5982 return rc; 5983 if (phba->sli4_hba.extents_in_use) 5984 return -EIO; 5985 5986 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5987 if (!rpi_hdr) { 5988 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5989 "0391 Error during rpi post operation\n"); 5990 lpfc_sli4_remove_rpis(phba); 5991 rc = -ENODEV; 5992 } 5993 5994 return rc; 5995 } 5996 5997 /** 5998 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 5999 * @phba: pointer to lpfc hba data structure. 6000 * 6001 * This routine is invoked to allocate a single 4KB memory region to 6002 * support rpis and stores them in the phba. This single region 6003 * provides support for up to 64 rpis. The region is used globally 6004 * by the device. 6005 * 6006 * Returns: 6007 * A valid rpi hdr on success. 6008 * A NULL pointer on any failure. 6009 **/ 6010 struct lpfc_rpi_hdr * 6011 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6012 { 6013 uint16_t rpi_limit, curr_rpi_range; 6014 struct lpfc_dmabuf *dmabuf; 6015 struct lpfc_rpi_hdr *rpi_hdr; 6016 uint32_t rpi_count; 6017 6018 /* 6019 * If the SLI4 port supports extents, posting the rpi header isn't 6020 * required. Set the expected maximum count and let the actual value 6021 * get set when extents are fully allocated. 6022 */ 6023 if (!phba->sli4_hba.rpi_hdrs_in_use) 6024 return NULL; 6025 if (phba->sli4_hba.extents_in_use) 6026 return NULL; 6027 6028 /* The limit on the logical index is just the max_rpi count. */ 6029 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 6030 phba->sli4_hba.max_cfg_param.max_rpi - 1; 6031 6032 spin_lock_irq(&phba->hbalock); 6033 /* 6034 * Establish the starting RPI in this header block. The starting 6035 * rpi is normalized to a zero base because the physical rpi is 6036 * port based. 6037 */ 6038 curr_rpi_range = phba->sli4_hba.next_rpi; 6039 spin_unlock_irq(&phba->hbalock); 6040 6041 /* 6042 * The port has a limited number of rpis. The increment here 6043 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 6044 * and to allow the full max_rpi range per port. 6045 */ 6046 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 6047 rpi_count = rpi_limit - curr_rpi_range; 6048 else 6049 rpi_count = LPFC_RPI_HDR_COUNT; 6050 6051 if (!rpi_count) 6052 return NULL; 6053 /* 6054 * First allocate the protocol header region for the port. The 6055 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6056 */ 6057 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6058 if (!dmabuf) 6059 return NULL; 6060 6061 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6062 LPFC_HDR_TEMPLATE_SIZE, 6063 &dmabuf->phys, GFP_KERNEL); 6064 if (!dmabuf->virt) { 6065 rpi_hdr = NULL; 6066 goto err_free_dmabuf; 6067 } 6068 6069 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6070 rpi_hdr = NULL; 6071 goto err_free_coherent; 6072 } 6073 6074 /* Save the rpi header data for cleanup later. */ 6075 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6076 if (!rpi_hdr) 6077 goto err_free_coherent; 6078 6079 rpi_hdr->dmabuf = dmabuf; 6080 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6081 rpi_hdr->page_count = 1; 6082 spin_lock_irq(&phba->hbalock); 6083 6084 /* The rpi_hdr stores the logical index only. */ 6085 rpi_hdr->start_rpi = curr_rpi_range; 6086 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6087 6088 /* 6089 * The next_rpi stores the next logical module-64 rpi value used 6090 * to post physical rpis in subsequent rpi postings. 6091 */ 6092 phba->sli4_hba.next_rpi += rpi_count; 6093 spin_unlock_irq(&phba->hbalock); 6094 return rpi_hdr; 6095 6096 err_free_coherent: 6097 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6098 dmabuf->virt, dmabuf->phys); 6099 err_free_dmabuf: 6100 kfree(dmabuf); 6101 return NULL; 6102 } 6103 6104 /** 6105 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6106 * @phba: pointer to lpfc hba data structure. 6107 * 6108 * This routine is invoked to remove all memory resources allocated 6109 * to support rpis for SLI4 ports not supporting extents. This routine 6110 * presumes the caller has released all rpis consumed by fabric or port 6111 * logins and is prepared to have the header pages removed. 6112 **/ 6113 void 6114 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6115 { 6116 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6117 6118 if (!phba->sli4_hba.rpi_hdrs_in_use) 6119 goto exit; 6120 6121 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6122 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6123 list_del(&rpi_hdr->list); 6124 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6125 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6126 kfree(rpi_hdr->dmabuf); 6127 kfree(rpi_hdr); 6128 } 6129 exit: 6130 /* There are no rpis available to the port now. */ 6131 phba->sli4_hba.next_rpi = 0; 6132 } 6133 6134 /** 6135 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6136 * @pdev: pointer to pci device data structure. 6137 * 6138 * This routine is invoked to allocate the driver hba data structure for an 6139 * HBA device. If the allocation is successful, the phba reference to the 6140 * PCI device data structure is set. 6141 * 6142 * Return codes 6143 * pointer to @phba - successful 6144 * NULL - error 6145 **/ 6146 static struct lpfc_hba * 6147 lpfc_hba_alloc(struct pci_dev *pdev) 6148 { 6149 struct lpfc_hba *phba; 6150 6151 /* Allocate memory for HBA structure */ 6152 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6153 if (!phba) { 6154 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6155 return NULL; 6156 } 6157 6158 /* Set reference to PCI device in HBA structure */ 6159 phba->pcidev = pdev; 6160 6161 /* Assign an unused board number */ 6162 phba->brd_no = lpfc_get_instance(); 6163 if (phba->brd_no < 0) { 6164 kfree(phba); 6165 return NULL; 6166 } 6167 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6168 6169 spin_lock_init(&phba->ct_ev_lock); 6170 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6171 6172 return phba; 6173 } 6174 6175 /** 6176 * lpfc_hba_free - Free driver hba data structure with a device. 6177 * @phba: pointer to lpfc hba data structure. 6178 * 6179 * This routine is invoked to free the driver hba data structure with an 6180 * HBA device. 6181 **/ 6182 static void 6183 lpfc_hba_free(struct lpfc_hba *phba) 6184 { 6185 /* Release the driver assigned board number */ 6186 idr_remove(&lpfc_hba_index, phba->brd_no); 6187 6188 /* Free memory allocated with sli rings */ 6189 kfree(phba->sli.ring); 6190 phba->sli.ring = NULL; 6191 6192 kfree(phba); 6193 return; 6194 } 6195 6196 /** 6197 * lpfc_create_shost - Create hba physical port with associated scsi host. 6198 * @phba: pointer to lpfc hba data structure. 6199 * 6200 * This routine is invoked to create HBA physical port and associate a SCSI 6201 * host with it. 6202 * 6203 * Return codes 6204 * 0 - successful 6205 * other values - error 6206 **/ 6207 static int 6208 lpfc_create_shost(struct lpfc_hba *phba) 6209 { 6210 struct lpfc_vport *vport; 6211 struct Scsi_Host *shost; 6212 6213 /* Initialize HBA FC structure */ 6214 phba->fc_edtov = FF_DEF_EDTOV; 6215 phba->fc_ratov = FF_DEF_RATOV; 6216 phba->fc_altov = FF_DEF_ALTOV; 6217 phba->fc_arbtov = FF_DEF_ARBTOV; 6218 6219 atomic_set(&phba->sdev_cnt, 0); 6220 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6221 if (!vport) 6222 return -ENODEV; 6223 6224 shost = lpfc_shost_from_vport(vport); 6225 phba->pport = vport; 6226 lpfc_debugfs_initialize(vport); 6227 /* Put reference to SCSI host to driver's device private data */ 6228 pci_set_drvdata(phba->pcidev, shost); 6229 6230 /* 6231 * At this point we are fully registered with PSA. In addition, 6232 * any initial discovery should be completed. 6233 */ 6234 vport->load_flag |= FC_ALLOW_FDMI; 6235 if (phba->cfg_enable_SmartSAN || 6236 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6237 6238 /* Setup appropriate attribute masks */ 6239 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6240 if (phba->cfg_enable_SmartSAN) 6241 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6242 else 6243 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6244 } 6245 return 0; 6246 } 6247 6248 /** 6249 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6250 * @phba: pointer to lpfc hba data structure. 6251 * 6252 * This routine is invoked to destroy HBA physical port and the associated 6253 * SCSI host. 6254 **/ 6255 static void 6256 lpfc_destroy_shost(struct lpfc_hba *phba) 6257 { 6258 struct lpfc_vport *vport = phba->pport; 6259 6260 /* Destroy physical port that associated with the SCSI host */ 6261 destroy_port(vport); 6262 6263 return; 6264 } 6265 6266 /** 6267 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6268 * @phba: pointer to lpfc hba data structure. 6269 * @shost: the shost to be used to detect Block guard settings. 6270 * 6271 * This routine sets up the local Block guard protocol settings for @shost. 6272 * This routine also allocates memory for debugging bg buffers. 6273 **/ 6274 static void 6275 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6276 { 6277 uint32_t old_mask; 6278 uint32_t old_guard; 6279 6280 int pagecnt = 10; 6281 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6282 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6283 "1478 Registering BlockGuard with the " 6284 "SCSI layer\n"); 6285 6286 old_mask = phba->cfg_prot_mask; 6287 old_guard = phba->cfg_prot_guard; 6288 6289 /* Only allow supported values */ 6290 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6291 SHOST_DIX_TYPE0_PROTECTION | 6292 SHOST_DIX_TYPE1_PROTECTION); 6293 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP | 6294 SHOST_DIX_GUARD_CRC); 6295 6296 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6297 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6298 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6299 6300 if (phba->cfg_prot_mask && phba->cfg_prot_guard) { 6301 if ((old_mask != phba->cfg_prot_mask) || 6302 (old_guard != phba->cfg_prot_guard)) 6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6304 "1475 Registering BlockGuard with the " 6305 "SCSI layer: mask %d guard %d\n", 6306 phba->cfg_prot_mask, 6307 phba->cfg_prot_guard); 6308 6309 scsi_host_set_prot(shost, phba->cfg_prot_mask); 6310 scsi_host_set_guard(shost, phba->cfg_prot_guard); 6311 } else 6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6313 "1479 Not Registering BlockGuard with the SCSI " 6314 "layer, Bad protection parameters: %d %d\n", 6315 old_mask, old_guard); 6316 } 6317 6318 if (!_dump_buf_data) { 6319 while (pagecnt) { 6320 spin_lock_init(&_dump_buf_lock); 6321 _dump_buf_data = 6322 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6323 if (_dump_buf_data) { 6324 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6325 "9043 BLKGRD: allocated %d pages for " 6326 "_dump_buf_data at 0x%p\n", 6327 (1 << pagecnt), _dump_buf_data); 6328 _dump_buf_data_order = pagecnt; 6329 memset(_dump_buf_data, 0, 6330 ((1 << PAGE_SHIFT) << pagecnt)); 6331 break; 6332 } else 6333 --pagecnt; 6334 } 6335 if (!_dump_buf_data_order) 6336 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6337 "9044 BLKGRD: ERROR unable to allocate " 6338 "memory for hexdump\n"); 6339 } else 6340 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6341 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6342 "\n", _dump_buf_data); 6343 if (!_dump_buf_dif) { 6344 while (pagecnt) { 6345 _dump_buf_dif = 6346 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6347 if (_dump_buf_dif) { 6348 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6349 "9046 BLKGRD: allocated %d pages for " 6350 "_dump_buf_dif at 0x%p\n", 6351 (1 << pagecnt), _dump_buf_dif); 6352 _dump_buf_dif_order = pagecnt; 6353 memset(_dump_buf_dif, 0, 6354 ((1 << PAGE_SHIFT) << pagecnt)); 6355 break; 6356 } else 6357 --pagecnt; 6358 } 6359 if (!_dump_buf_dif_order) 6360 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6361 "9047 BLKGRD: ERROR unable to allocate " 6362 "memory for hexdump\n"); 6363 } else 6364 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6365 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6366 _dump_buf_dif); 6367 } 6368 6369 /** 6370 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6371 * @phba: pointer to lpfc hba data structure. 6372 * 6373 * This routine is invoked to perform all the necessary post initialization 6374 * setup for the device. 6375 **/ 6376 static void 6377 lpfc_post_init_setup(struct lpfc_hba *phba) 6378 { 6379 struct Scsi_Host *shost; 6380 struct lpfc_adapter_event_header adapter_event; 6381 6382 /* Get the default values for Model Name and Description */ 6383 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6384 6385 /* 6386 * hba setup may have changed the hba_queue_depth so we need to 6387 * adjust the value of can_queue. 6388 */ 6389 shost = pci_get_drvdata(phba->pcidev); 6390 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6391 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6392 lpfc_setup_bg(phba, shost); 6393 6394 lpfc_host_attrib_init(shost); 6395 6396 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6397 spin_lock_irq(shost->host_lock); 6398 lpfc_poll_start_timer(phba); 6399 spin_unlock_irq(shost->host_lock); 6400 } 6401 6402 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6403 "0428 Perform SCSI scan\n"); 6404 /* Send board arrival event to upper layer */ 6405 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6406 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6407 fc_host_post_vendor_event(shost, fc_get_event_number(), 6408 sizeof(adapter_event), 6409 (char *) &adapter_event, 6410 LPFC_NL_VENDOR_ID); 6411 return; 6412 } 6413 6414 /** 6415 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6416 * @phba: pointer to lpfc hba data structure. 6417 * 6418 * This routine is invoked to set up the PCI device memory space for device 6419 * with SLI-3 interface spec. 6420 * 6421 * Return codes 6422 * 0 - successful 6423 * other values - error 6424 **/ 6425 static int 6426 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6427 { 6428 struct pci_dev *pdev; 6429 unsigned long bar0map_len, bar2map_len; 6430 int i, hbq_count; 6431 void *ptr; 6432 int error = -ENODEV; 6433 6434 /* Obtain PCI device reference */ 6435 if (!phba->pcidev) 6436 return error; 6437 else 6438 pdev = phba->pcidev; 6439 6440 /* Set the device DMA mask size */ 6441 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6442 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6443 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6444 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6445 return error; 6446 } 6447 } 6448 6449 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6450 * required by each mapping. 6451 */ 6452 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6453 bar0map_len = pci_resource_len(pdev, 0); 6454 6455 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6456 bar2map_len = pci_resource_len(pdev, 2); 6457 6458 /* Map HBA SLIM to a kernel virtual address. */ 6459 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6460 if (!phba->slim_memmap_p) { 6461 dev_printk(KERN_ERR, &pdev->dev, 6462 "ioremap failed for SLIM memory.\n"); 6463 goto out; 6464 } 6465 6466 /* Map HBA Control Registers to a kernel virtual address. */ 6467 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6468 if (!phba->ctrl_regs_memmap_p) { 6469 dev_printk(KERN_ERR, &pdev->dev, 6470 "ioremap failed for HBA control registers.\n"); 6471 goto out_iounmap_slim; 6472 } 6473 6474 /* Allocate memory for SLI-2 structures */ 6475 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6476 &phba->slim2p.phys, GFP_KERNEL); 6477 if (!phba->slim2p.virt) 6478 goto out_iounmap; 6479 6480 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6481 phba->mbox_ext = (phba->slim2p.virt + 6482 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6483 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6484 phba->IOCBs = (phba->slim2p.virt + 6485 offsetof(struct lpfc_sli2_slim, IOCBs)); 6486 6487 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6488 lpfc_sli_hbq_size(), 6489 &phba->hbqslimp.phys, 6490 GFP_KERNEL); 6491 if (!phba->hbqslimp.virt) 6492 goto out_free_slim; 6493 6494 hbq_count = lpfc_sli_hbq_count(); 6495 ptr = phba->hbqslimp.virt; 6496 for (i = 0; i < hbq_count; ++i) { 6497 phba->hbqs[i].hbq_virt = ptr; 6498 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6499 ptr += (lpfc_hbq_defs[i]->entry_count * 6500 sizeof(struct lpfc_hbq_entry)); 6501 } 6502 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6503 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6504 6505 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6506 6507 INIT_LIST_HEAD(&phba->rb_pend_list); 6508 6509 phba->MBslimaddr = phba->slim_memmap_p; 6510 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6511 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6512 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6513 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6514 6515 return 0; 6516 6517 out_free_slim: 6518 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6519 phba->slim2p.virt, phba->slim2p.phys); 6520 out_iounmap: 6521 iounmap(phba->ctrl_regs_memmap_p); 6522 out_iounmap_slim: 6523 iounmap(phba->slim_memmap_p); 6524 out: 6525 return error; 6526 } 6527 6528 /** 6529 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6530 * @phba: pointer to lpfc hba data structure. 6531 * 6532 * This routine is invoked to unset the PCI device memory space for device 6533 * with SLI-3 interface spec. 6534 **/ 6535 static void 6536 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6537 { 6538 struct pci_dev *pdev; 6539 6540 /* Obtain PCI device reference */ 6541 if (!phba->pcidev) 6542 return; 6543 else 6544 pdev = phba->pcidev; 6545 6546 /* Free coherent DMA memory allocated */ 6547 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6548 phba->hbqslimp.virt, phba->hbqslimp.phys); 6549 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6550 phba->slim2p.virt, phba->slim2p.phys); 6551 6552 /* I/O memory unmap */ 6553 iounmap(phba->ctrl_regs_memmap_p); 6554 iounmap(phba->slim_memmap_p); 6555 6556 return; 6557 } 6558 6559 /** 6560 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6561 * @phba: pointer to lpfc hba data structure. 6562 * 6563 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6564 * done and check status. 6565 * 6566 * Return 0 if successful, otherwise -ENODEV. 6567 **/ 6568 int 6569 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6570 { 6571 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6572 struct lpfc_register reg_data; 6573 int i, port_error = 0; 6574 uint32_t if_type; 6575 6576 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6577 memset(®_data, 0, sizeof(reg_data)); 6578 if (!phba->sli4_hba.PSMPHRregaddr) 6579 return -ENODEV; 6580 6581 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6582 for (i = 0; i < 3000; i++) { 6583 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6584 &portsmphr_reg.word0) || 6585 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6586 /* Port has a fatal POST error, break out */ 6587 port_error = -ENODEV; 6588 break; 6589 } 6590 if (LPFC_POST_STAGE_PORT_READY == 6591 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6592 break; 6593 msleep(10); 6594 } 6595 6596 /* 6597 * If there was a port error during POST, then don't proceed with 6598 * other register reads as the data may not be valid. Just exit. 6599 */ 6600 if (port_error) { 6601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6602 "1408 Port Failed POST - portsmphr=0x%x, " 6603 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6604 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6605 portsmphr_reg.word0, 6606 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6607 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6608 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6609 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6610 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6611 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6612 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6613 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6614 } else { 6615 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6616 "2534 Device Info: SLIFamily=0x%x, " 6617 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6618 "SLIHint_2=0x%x, FT=0x%x\n", 6619 bf_get(lpfc_sli_intf_sli_family, 6620 &phba->sli4_hba.sli_intf), 6621 bf_get(lpfc_sli_intf_slirev, 6622 &phba->sli4_hba.sli_intf), 6623 bf_get(lpfc_sli_intf_if_type, 6624 &phba->sli4_hba.sli_intf), 6625 bf_get(lpfc_sli_intf_sli_hint1, 6626 &phba->sli4_hba.sli_intf), 6627 bf_get(lpfc_sli_intf_sli_hint2, 6628 &phba->sli4_hba.sli_intf), 6629 bf_get(lpfc_sli_intf_func_type, 6630 &phba->sli4_hba.sli_intf)); 6631 /* 6632 * Check for other Port errors during the initialization 6633 * process. Fail the load if the port did not come up 6634 * correctly. 6635 */ 6636 if_type = bf_get(lpfc_sli_intf_if_type, 6637 &phba->sli4_hba.sli_intf); 6638 switch (if_type) { 6639 case LPFC_SLI_INTF_IF_TYPE_0: 6640 phba->sli4_hba.ue_mask_lo = 6641 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6642 phba->sli4_hba.ue_mask_hi = 6643 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6644 uerrlo_reg.word0 = 6645 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6646 uerrhi_reg.word0 = 6647 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6648 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6649 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6651 "1422 Unrecoverable Error " 6652 "Detected during POST " 6653 "uerr_lo_reg=0x%x, " 6654 "uerr_hi_reg=0x%x, " 6655 "ue_mask_lo_reg=0x%x, " 6656 "ue_mask_hi_reg=0x%x\n", 6657 uerrlo_reg.word0, 6658 uerrhi_reg.word0, 6659 phba->sli4_hba.ue_mask_lo, 6660 phba->sli4_hba.ue_mask_hi); 6661 port_error = -ENODEV; 6662 } 6663 break; 6664 case LPFC_SLI_INTF_IF_TYPE_2: 6665 /* Final checks. The port status should be clean. */ 6666 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6667 ®_data.word0) || 6668 (bf_get(lpfc_sliport_status_err, ®_data) && 6669 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6670 phba->work_status[0] = 6671 readl(phba->sli4_hba.u.if_type2. 6672 ERR1regaddr); 6673 phba->work_status[1] = 6674 readl(phba->sli4_hba.u.if_type2. 6675 ERR2regaddr); 6676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6677 "2888 Unrecoverable port error " 6678 "following POST: port status reg " 6679 "0x%x, port_smphr reg 0x%x, " 6680 "error 1=0x%x, error 2=0x%x\n", 6681 reg_data.word0, 6682 portsmphr_reg.word0, 6683 phba->work_status[0], 6684 phba->work_status[1]); 6685 port_error = -ENODEV; 6686 } 6687 break; 6688 case LPFC_SLI_INTF_IF_TYPE_1: 6689 default: 6690 break; 6691 } 6692 } 6693 return port_error; 6694 } 6695 6696 /** 6697 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6698 * @phba: pointer to lpfc hba data structure. 6699 * @if_type: The SLI4 interface type getting configured. 6700 * 6701 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6702 * memory map. 6703 **/ 6704 static void 6705 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6706 { 6707 switch (if_type) { 6708 case LPFC_SLI_INTF_IF_TYPE_0: 6709 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6710 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6711 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6712 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6713 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6714 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6715 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6716 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6717 phba->sli4_hba.SLIINTFregaddr = 6718 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6719 break; 6720 case LPFC_SLI_INTF_IF_TYPE_2: 6721 phba->sli4_hba.u.if_type2.ERR1regaddr = 6722 phba->sli4_hba.conf_regs_memmap_p + 6723 LPFC_CTL_PORT_ER1_OFFSET; 6724 phba->sli4_hba.u.if_type2.ERR2regaddr = 6725 phba->sli4_hba.conf_regs_memmap_p + 6726 LPFC_CTL_PORT_ER2_OFFSET; 6727 phba->sli4_hba.u.if_type2.CTRLregaddr = 6728 phba->sli4_hba.conf_regs_memmap_p + 6729 LPFC_CTL_PORT_CTL_OFFSET; 6730 phba->sli4_hba.u.if_type2.STATUSregaddr = 6731 phba->sli4_hba.conf_regs_memmap_p + 6732 LPFC_CTL_PORT_STA_OFFSET; 6733 phba->sli4_hba.SLIINTFregaddr = 6734 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6735 phba->sli4_hba.PSMPHRregaddr = 6736 phba->sli4_hba.conf_regs_memmap_p + 6737 LPFC_CTL_PORT_SEM_OFFSET; 6738 phba->sli4_hba.RQDBregaddr = 6739 phba->sli4_hba.conf_regs_memmap_p + 6740 LPFC_ULP0_RQ_DOORBELL; 6741 phba->sli4_hba.WQDBregaddr = 6742 phba->sli4_hba.conf_regs_memmap_p + 6743 LPFC_ULP0_WQ_DOORBELL; 6744 phba->sli4_hba.EQCQDBregaddr = 6745 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6746 phba->sli4_hba.MQDBregaddr = 6747 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6748 phba->sli4_hba.BMBXregaddr = 6749 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6750 break; 6751 case LPFC_SLI_INTF_IF_TYPE_1: 6752 default: 6753 dev_printk(KERN_ERR, &phba->pcidev->dev, 6754 "FATAL - unsupported SLI4 interface type - %d\n", 6755 if_type); 6756 break; 6757 } 6758 } 6759 6760 /** 6761 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6762 * @phba: pointer to lpfc hba data structure. 6763 * 6764 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6765 * memory map. 6766 **/ 6767 static void 6768 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6769 { 6770 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6771 LPFC_SLIPORT_IF0_SMPHR; 6772 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6773 LPFC_HST_ISR0; 6774 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6775 LPFC_HST_IMR0; 6776 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6777 LPFC_HST_ISCR0; 6778 } 6779 6780 /** 6781 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6782 * @phba: pointer to lpfc hba data structure. 6783 * @vf: virtual function number 6784 * 6785 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6786 * based on the given viftual function number, @vf. 6787 * 6788 * Return 0 if successful, otherwise -ENODEV. 6789 **/ 6790 static int 6791 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6792 { 6793 if (vf > LPFC_VIR_FUNC_MAX) 6794 return -ENODEV; 6795 6796 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6797 vf * LPFC_VFR_PAGE_SIZE + 6798 LPFC_ULP0_RQ_DOORBELL); 6799 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6800 vf * LPFC_VFR_PAGE_SIZE + 6801 LPFC_ULP0_WQ_DOORBELL); 6802 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6803 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6804 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6805 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6806 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6807 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6808 return 0; 6809 } 6810 6811 /** 6812 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6813 * @phba: pointer to lpfc hba data structure. 6814 * 6815 * This routine is invoked to create the bootstrap mailbox 6816 * region consistent with the SLI-4 interface spec. This 6817 * routine allocates all memory necessary to communicate 6818 * mailbox commands to the port and sets up all alignment 6819 * needs. No locks are expected to be held when calling 6820 * this routine. 6821 * 6822 * Return codes 6823 * 0 - successful 6824 * -ENOMEM - could not allocated memory. 6825 **/ 6826 static int 6827 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6828 { 6829 uint32_t bmbx_size; 6830 struct lpfc_dmabuf *dmabuf; 6831 struct dma_address *dma_address; 6832 uint32_t pa_addr; 6833 uint64_t phys_addr; 6834 6835 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6836 if (!dmabuf) 6837 return -ENOMEM; 6838 6839 /* 6840 * The bootstrap mailbox region is comprised of 2 parts 6841 * plus an alignment restriction of 16 bytes. 6842 */ 6843 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6844 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 6845 &dmabuf->phys, GFP_KERNEL); 6846 if (!dmabuf->virt) { 6847 kfree(dmabuf); 6848 return -ENOMEM; 6849 } 6850 6851 /* 6852 * Initialize the bootstrap mailbox pointers now so that the register 6853 * operations are simple later. The mailbox dma address is required 6854 * to be 16-byte aligned. Also align the virtual memory as each 6855 * maibox is copied into the bmbx mailbox region before issuing the 6856 * command to the port. 6857 */ 6858 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6859 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6860 6861 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6862 LPFC_ALIGN_16_BYTE); 6863 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6864 LPFC_ALIGN_16_BYTE); 6865 6866 /* 6867 * Set the high and low physical addresses now. The SLI4 alignment 6868 * requirement is 16 bytes and the mailbox is posted to the port 6869 * as two 30-bit addresses. The other data is a bit marking whether 6870 * the 30-bit address is the high or low address. 6871 * Upcast bmbx aphys to 64bits so shift instruction compiles 6872 * clean on 32 bit machines. 6873 */ 6874 dma_address = &phba->sli4_hba.bmbx.dma_address; 6875 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6876 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6877 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6878 LPFC_BMBX_BIT1_ADDR_HI); 6879 6880 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6881 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6882 LPFC_BMBX_BIT1_ADDR_LO); 6883 return 0; 6884 } 6885 6886 /** 6887 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6888 * @phba: pointer to lpfc hba data structure. 6889 * 6890 * This routine is invoked to teardown the bootstrap mailbox 6891 * region and release all host resources. This routine requires 6892 * the caller to ensure all mailbox commands recovered, no 6893 * additional mailbox comands are sent, and interrupts are disabled 6894 * before calling this routine. 6895 * 6896 **/ 6897 static void 6898 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6899 { 6900 dma_free_coherent(&phba->pcidev->dev, 6901 phba->sli4_hba.bmbx.bmbx_size, 6902 phba->sli4_hba.bmbx.dmabuf->virt, 6903 phba->sli4_hba.bmbx.dmabuf->phys); 6904 6905 kfree(phba->sli4_hba.bmbx.dmabuf); 6906 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6907 } 6908 6909 /** 6910 * lpfc_sli4_read_config - Get the config parameters. 6911 * @phba: pointer to lpfc hba data structure. 6912 * 6913 * This routine is invoked to read the configuration parameters from the HBA. 6914 * The configuration parameters are used to set the base and maximum values 6915 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6916 * allocation for the port. 6917 * 6918 * Return codes 6919 * 0 - successful 6920 * -ENOMEM - No available memory 6921 * -EIO - The mailbox failed to complete successfully. 6922 **/ 6923 int 6924 lpfc_sli4_read_config(struct lpfc_hba *phba) 6925 { 6926 LPFC_MBOXQ_t *pmb; 6927 struct lpfc_mbx_read_config *rd_config; 6928 union lpfc_sli4_cfg_shdr *shdr; 6929 uint32_t shdr_status, shdr_add_status; 6930 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6931 struct lpfc_rsrc_desc_fcfcoe *desc; 6932 char *pdesc_0; 6933 uint16_t forced_link_speed; 6934 uint32_t if_type; 6935 int length, i, rc = 0, rc2; 6936 6937 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6938 if (!pmb) { 6939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6940 "2011 Unable to allocate memory for issuing " 6941 "SLI_CONFIG_SPECIAL mailbox command\n"); 6942 return -ENOMEM; 6943 } 6944 6945 lpfc_read_config(phba, pmb); 6946 6947 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6948 if (rc != MBX_SUCCESS) { 6949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6950 "2012 Mailbox failed , mbxCmd x%x " 6951 "READ_CONFIG, mbxStatus x%x\n", 6952 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6953 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6954 rc = -EIO; 6955 } else { 6956 rd_config = &pmb->u.mqe.un.rd_config; 6957 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6958 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6959 phba->sli4_hba.lnk_info.lnk_tp = 6960 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6961 phba->sli4_hba.lnk_info.lnk_no = 6962 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6964 "3081 lnk_type:%d, lnk_numb:%d\n", 6965 phba->sli4_hba.lnk_info.lnk_tp, 6966 phba->sli4_hba.lnk_info.lnk_no); 6967 } else 6968 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6969 "3082 Mailbox (x%x) returned ldv:x0\n", 6970 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6971 phba->sli4_hba.extents_in_use = 6972 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6973 phba->sli4_hba.max_cfg_param.max_xri = 6974 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6975 phba->sli4_hba.max_cfg_param.xri_base = 6976 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6977 phba->sli4_hba.max_cfg_param.max_vpi = 6978 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6979 phba->sli4_hba.max_cfg_param.vpi_base = 6980 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6981 phba->sli4_hba.max_cfg_param.max_rpi = 6982 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6983 phba->sli4_hba.max_cfg_param.rpi_base = 6984 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6985 phba->sli4_hba.max_cfg_param.max_vfi = 6986 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6987 phba->sli4_hba.max_cfg_param.vfi_base = 6988 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6989 phba->sli4_hba.max_cfg_param.max_fcfi = 6990 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6991 phba->sli4_hba.max_cfg_param.max_eq = 6992 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6993 phba->sli4_hba.max_cfg_param.max_rq = 6994 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6995 phba->sli4_hba.max_cfg_param.max_wq = 6996 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6997 phba->sli4_hba.max_cfg_param.max_cq = 6998 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6999 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 7000 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 7001 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 7002 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7003 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7004 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7005 phba->max_vports = phba->max_vpi; 7006 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7007 "2003 cfg params Extents? %d " 7008 "XRI(B:%d M:%d), " 7009 "VPI(B:%d M:%d) " 7010 "VFI(B:%d M:%d) " 7011 "RPI(B:%d M:%d) " 7012 "FCFI(Count:%d)\n", 7013 phba->sli4_hba.extents_in_use, 7014 phba->sli4_hba.max_cfg_param.xri_base, 7015 phba->sli4_hba.max_cfg_param.max_xri, 7016 phba->sli4_hba.max_cfg_param.vpi_base, 7017 phba->sli4_hba.max_cfg_param.max_vpi, 7018 phba->sli4_hba.max_cfg_param.vfi_base, 7019 phba->sli4_hba.max_cfg_param.max_vfi, 7020 phba->sli4_hba.max_cfg_param.rpi_base, 7021 phba->sli4_hba.max_cfg_param.max_rpi, 7022 phba->sli4_hba.max_cfg_param.max_fcfi); 7023 } 7024 7025 if (rc) 7026 goto read_cfg_out; 7027 7028 /* Update link speed if forced link speed is supported */ 7029 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7030 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7031 forced_link_speed = 7032 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config); 7033 if (forced_link_speed) { 7034 phba->hba_flag |= HBA_FORCED_LINK_SPEED; 7035 7036 switch (forced_link_speed) { 7037 case LINK_SPEED_1G: 7038 phba->cfg_link_speed = 7039 LPFC_USER_LINK_SPEED_1G; 7040 break; 7041 case LINK_SPEED_2G: 7042 phba->cfg_link_speed = 7043 LPFC_USER_LINK_SPEED_2G; 7044 break; 7045 case LINK_SPEED_4G: 7046 phba->cfg_link_speed = 7047 LPFC_USER_LINK_SPEED_4G; 7048 break; 7049 case LINK_SPEED_8G: 7050 phba->cfg_link_speed = 7051 LPFC_USER_LINK_SPEED_8G; 7052 break; 7053 case LINK_SPEED_10G: 7054 phba->cfg_link_speed = 7055 LPFC_USER_LINK_SPEED_10G; 7056 break; 7057 case LINK_SPEED_16G: 7058 phba->cfg_link_speed = 7059 LPFC_USER_LINK_SPEED_16G; 7060 break; 7061 case LINK_SPEED_32G: 7062 phba->cfg_link_speed = 7063 LPFC_USER_LINK_SPEED_32G; 7064 break; 7065 case 0xffff: 7066 phba->cfg_link_speed = 7067 LPFC_USER_LINK_SPEED_AUTO; 7068 break; 7069 default: 7070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7071 "0047 Unrecognized link " 7072 "speed : %d\n", 7073 forced_link_speed); 7074 phba->cfg_link_speed = 7075 LPFC_USER_LINK_SPEED_AUTO; 7076 } 7077 } 7078 } 7079 7080 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7081 length = phba->sli4_hba.max_cfg_param.max_xri - 7082 lpfc_sli4_get_els_iocb_cnt(phba); 7083 if (phba->cfg_hba_queue_depth > length) { 7084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7085 "3361 HBA queue depth changed from %d to %d\n", 7086 phba->cfg_hba_queue_depth, length); 7087 phba->cfg_hba_queue_depth = length; 7088 } 7089 7090 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 7091 LPFC_SLI_INTF_IF_TYPE_2) 7092 goto read_cfg_out; 7093 7094 /* get the pf# and vf# for SLI4 if_type 2 port */ 7095 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7096 sizeof(struct lpfc_sli4_cfg_mhdr)); 7097 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7098 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7099 length, LPFC_SLI4_MBX_EMBED); 7100 7101 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7102 shdr = (union lpfc_sli4_cfg_shdr *) 7103 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7104 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7105 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7106 if (rc2 || shdr_status || shdr_add_status) { 7107 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7108 "3026 Mailbox failed , mbxCmd x%x " 7109 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7110 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7111 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7112 goto read_cfg_out; 7113 } 7114 7115 /* search for fc_fcoe resrouce descriptor */ 7116 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7117 7118 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7119 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7120 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7121 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7122 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7123 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7124 goto read_cfg_out; 7125 7126 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7127 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7128 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7129 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7130 phba->sli4_hba.iov.pf_number = 7131 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7132 phba->sli4_hba.iov.vf_number = 7133 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7134 break; 7135 } 7136 } 7137 7138 if (i < LPFC_RSRC_DESC_MAX_NUM) 7139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7140 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7141 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7142 phba->sli4_hba.iov.vf_number); 7143 else 7144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7145 "3028 GET_FUNCTION_CONFIG: failed to find " 7146 "Resrouce Descriptor:x%x\n", 7147 LPFC_RSRC_DESC_TYPE_FCFCOE); 7148 7149 read_cfg_out: 7150 mempool_free(pmb, phba->mbox_mem_pool); 7151 return rc; 7152 } 7153 7154 /** 7155 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7156 * @phba: pointer to lpfc hba data structure. 7157 * 7158 * This routine is invoked to setup the port-side endian order when 7159 * the port if_type is 0. This routine has no function for other 7160 * if_types. 7161 * 7162 * Return codes 7163 * 0 - successful 7164 * -ENOMEM - No available memory 7165 * -EIO - The mailbox failed to complete successfully. 7166 **/ 7167 static int 7168 lpfc_setup_endian_order(struct lpfc_hba *phba) 7169 { 7170 LPFC_MBOXQ_t *mboxq; 7171 uint32_t if_type, rc = 0; 7172 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7173 HOST_ENDIAN_HIGH_WORD1}; 7174 7175 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7176 switch (if_type) { 7177 case LPFC_SLI_INTF_IF_TYPE_0: 7178 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7179 GFP_KERNEL); 7180 if (!mboxq) { 7181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7182 "0492 Unable to allocate memory for " 7183 "issuing SLI_CONFIG_SPECIAL mailbox " 7184 "command\n"); 7185 return -ENOMEM; 7186 } 7187 7188 /* 7189 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7190 * two words to contain special data values and no other data. 7191 */ 7192 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7193 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7194 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7195 if (rc != MBX_SUCCESS) { 7196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7197 "0493 SLI_CONFIG_SPECIAL mailbox " 7198 "failed with status x%x\n", 7199 rc); 7200 rc = -EIO; 7201 } 7202 mempool_free(mboxq, phba->mbox_mem_pool); 7203 break; 7204 case LPFC_SLI_INTF_IF_TYPE_2: 7205 case LPFC_SLI_INTF_IF_TYPE_1: 7206 default: 7207 break; 7208 } 7209 return rc; 7210 } 7211 7212 /** 7213 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 7214 * @phba: pointer to lpfc hba data structure. 7215 * 7216 * This routine is invoked to check the user settable queue counts for EQs and 7217 * CQs. after this routine is called the counts will be set to valid values that 7218 * adhere to the constraints of the system's interrupt vectors and the port's 7219 * queue resources. 7220 * 7221 * Return codes 7222 * 0 - successful 7223 * -ENOMEM - No available memory 7224 **/ 7225 static int 7226 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7227 { 7228 int cfg_fcp_io_channel; 7229 uint32_t cpu; 7230 uint32_t i = 0; 7231 int fof_vectors = phba->cfg_fof ? 1 : 0; 7232 7233 /* 7234 * Sanity check for configured queue parameters against the run-time 7235 * device parameters 7236 */ 7237 7238 /* Sanity check on HBA EQ parameters */ 7239 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 7240 7241 /* It doesn't make sense to have more io channels then online CPUs */ 7242 for_each_present_cpu(cpu) { 7243 if (cpu_online(cpu)) 7244 i++; 7245 } 7246 phba->sli4_hba.num_online_cpu = i; 7247 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7248 phba->sli4_hba.curr_disp_cpu = 0; 7249 7250 if (i < cfg_fcp_io_channel) { 7251 lpfc_printf_log(phba, 7252 KERN_ERR, LOG_INIT, 7253 "3188 Reducing IO channels to match number of " 7254 "online CPUs: from %d to %d\n", 7255 cfg_fcp_io_channel, i); 7256 cfg_fcp_io_channel = i; 7257 } 7258 7259 if (cfg_fcp_io_channel + fof_vectors > 7260 phba->sli4_hba.max_cfg_param.max_eq) { 7261 if (phba->sli4_hba.max_cfg_param.max_eq < 7262 LPFC_FCP_IO_CHAN_MIN) { 7263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7264 "2574 Not enough EQs (%d) from the " 7265 "pci function for supporting FCP " 7266 "EQs (%d)\n", 7267 phba->sli4_hba.max_cfg_param.max_eq, 7268 phba->cfg_fcp_io_channel); 7269 goto out_error; 7270 } 7271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7272 "2575 Reducing IO channels to match number of " 7273 "available EQs: from %d to %d\n", 7274 cfg_fcp_io_channel, 7275 phba->sli4_hba.max_cfg_param.max_eq); 7276 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 7277 fof_vectors; 7278 } 7279 7280 /* The actual number of FCP event queues adopted */ 7281 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 7282 7283 /* Get EQ depth from module parameter, fake the default for now */ 7284 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7285 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7286 7287 /* Get CQ depth from module parameter, fake the default for now */ 7288 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7289 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7290 7291 return 0; 7292 out_error: 7293 return -ENOMEM; 7294 } 7295 7296 /** 7297 * lpfc_sli4_queue_create - Create all the SLI4 queues 7298 * @phba: pointer to lpfc hba data structure. 7299 * 7300 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7301 * operation. For each SLI4 queue type, the parameters such as queue entry 7302 * count (queue depth) shall be taken from the module parameter. For now, 7303 * we just use some constant number as place holder. 7304 * 7305 * Return codes 7306 * 0 - successful 7307 * -ENOMEM - No availble memory 7308 * -EIO - The mailbox failed to complete successfully. 7309 **/ 7310 int 7311 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7312 { 7313 struct lpfc_queue *qdesc; 7314 uint32_t wqesize; 7315 int idx; 7316 7317 /* 7318 * Create HBA Record arrays. 7319 */ 7320 if (!phba->cfg_fcp_io_channel) 7321 return -ERANGE; 7322 7323 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7324 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7325 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7326 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7327 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7328 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7329 7330 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 7331 phba->cfg_fcp_io_channel), GFP_KERNEL); 7332 if (!phba->sli4_hba.hba_eq) { 7333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7334 "2576 Failed allocate memory for " 7335 "fast-path EQ record array\n"); 7336 goto out_error; 7337 } 7338 7339 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 7340 phba->cfg_fcp_io_channel), GFP_KERNEL); 7341 if (!phba->sli4_hba.fcp_cq) { 7342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7343 "2577 Failed allocate memory for fast-path " 7344 "CQ record array\n"); 7345 goto out_error; 7346 } 7347 7348 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 7349 phba->cfg_fcp_io_channel), GFP_KERNEL); 7350 if (!phba->sli4_hba.fcp_wq) { 7351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7352 "2578 Failed allocate memory for fast-path " 7353 "WQ record array\n"); 7354 goto out_error; 7355 } 7356 7357 /* 7358 * Since the first EQ can have multiple CQs associated with it, 7359 * this array is used to quickly see if we have a FCP fast-path 7360 * CQ match. 7361 */ 7362 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 7363 phba->cfg_fcp_io_channel), GFP_KERNEL); 7364 if (!phba->sli4_hba.fcp_cq_map) { 7365 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7366 "2545 Failed allocate memory for fast-path " 7367 "CQ map\n"); 7368 goto out_error; 7369 } 7370 7371 /* 7372 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 7373 * how many EQs to create. 7374 */ 7375 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7376 7377 /* Create EQs */ 7378 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 7379 phba->sli4_hba.eq_ecount); 7380 if (!qdesc) { 7381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7382 "0497 Failed allocate EQ (%d)\n", idx); 7383 goto out_error; 7384 } 7385 phba->sli4_hba.hba_eq[idx] = qdesc; 7386 7387 /* Create Fast Path FCP CQs */ 7388 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7389 phba->sli4_hba.cq_ecount); 7390 if (!qdesc) { 7391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7392 "0499 Failed allocate fast-path FCP " 7393 "CQ (%d)\n", idx); 7394 goto out_error; 7395 } 7396 phba->sli4_hba.fcp_cq[idx] = qdesc; 7397 7398 /* Create Fast Path FCP WQs */ 7399 wqesize = (phba->fcp_embed_io) ? 7400 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; 7401 qdesc = lpfc_sli4_queue_alloc(phba, wqesize, 7402 phba->sli4_hba.wq_ecount); 7403 if (!qdesc) { 7404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7405 "0503 Failed allocate fast-path FCP " 7406 "WQ (%d)\n", idx); 7407 goto out_error; 7408 } 7409 phba->sli4_hba.fcp_wq[idx] = qdesc; 7410 } 7411 7412 7413 /* 7414 * Create Slow Path Completion Queues (CQs) 7415 */ 7416 7417 /* Create slow-path Mailbox Command Complete Queue */ 7418 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7419 phba->sli4_hba.cq_ecount); 7420 if (!qdesc) { 7421 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7422 "0500 Failed allocate slow-path mailbox CQ\n"); 7423 goto out_error; 7424 } 7425 phba->sli4_hba.mbx_cq = qdesc; 7426 7427 /* Create slow-path ELS Complete Queue */ 7428 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7429 phba->sli4_hba.cq_ecount); 7430 if (!qdesc) { 7431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7432 "0501 Failed allocate slow-path ELS CQ\n"); 7433 goto out_error; 7434 } 7435 phba->sli4_hba.els_cq = qdesc; 7436 7437 7438 /* 7439 * Create Slow Path Work Queues (WQs) 7440 */ 7441 7442 /* Create Mailbox Command Queue */ 7443 7444 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7445 phba->sli4_hba.mq_ecount); 7446 if (!qdesc) { 7447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7448 "0505 Failed allocate slow-path MQ\n"); 7449 goto out_error; 7450 } 7451 phba->sli4_hba.mbx_wq = qdesc; 7452 7453 /* 7454 * Create ELS Work Queues 7455 */ 7456 7457 /* Create slow-path ELS Work Queue */ 7458 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7459 phba->sli4_hba.wq_ecount); 7460 if (!qdesc) { 7461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7462 "0504 Failed allocate slow-path ELS WQ\n"); 7463 goto out_error; 7464 } 7465 phba->sli4_hba.els_wq = qdesc; 7466 7467 /* 7468 * Create Receive Queue (RQ) 7469 */ 7470 7471 /* Create Receive Queue for header */ 7472 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7473 phba->sli4_hba.rq_ecount); 7474 if (!qdesc) { 7475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7476 "0506 Failed allocate receive HRQ\n"); 7477 goto out_error; 7478 } 7479 phba->sli4_hba.hdr_rq = qdesc; 7480 7481 /* Create Receive Queue for data */ 7482 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7483 phba->sli4_hba.rq_ecount); 7484 if (!qdesc) { 7485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7486 "0507 Failed allocate receive DRQ\n"); 7487 goto out_error; 7488 } 7489 phba->sli4_hba.dat_rq = qdesc; 7490 7491 /* Create the Queues needed for Flash Optimized Fabric operations */ 7492 if (phba->cfg_fof) 7493 lpfc_fof_queue_create(phba); 7494 return 0; 7495 7496 out_error: 7497 lpfc_sli4_queue_destroy(phba); 7498 return -ENOMEM; 7499 } 7500 7501 /** 7502 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7503 * @phba: pointer to lpfc hba data structure. 7504 * 7505 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7506 * operation. 7507 * 7508 * Return codes 7509 * 0 - successful 7510 * -ENOMEM - No available memory 7511 * -EIO - The mailbox failed to complete successfully. 7512 **/ 7513 void 7514 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7515 { 7516 int idx; 7517 7518 if (phba->cfg_fof) 7519 lpfc_fof_queue_destroy(phba); 7520 7521 if (phba->sli4_hba.hba_eq != NULL) { 7522 /* Release HBA event queue */ 7523 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7524 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7525 lpfc_sli4_queue_free( 7526 phba->sli4_hba.hba_eq[idx]); 7527 phba->sli4_hba.hba_eq[idx] = NULL; 7528 } 7529 } 7530 kfree(phba->sli4_hba.hba_eq); 7531 phba->sli4_hba.hba_eq = NULL; 7532 } 7533 7534 if (phba->sli4_hba.fcp_cq != NULL) { 7535 /* Release FCP completion queue */ 7536 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7537 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7538 lpfc_sli4_queue_free( 7539 phba->sli4_hba.fcp_cq[idx]); 7540 phba->sli4_hba.fcp_cq[idx] = NULL; 7541 } 7542 } 7543 kfree(phba->sli4_hba.fcp_cq); 7544 phba->sli4_hba.fcp_cq = NULL; 7545 } 7546 7547 if (phba->sli4_hba.fcp_wq != NULL) { 7548 /* Release FCP work queue */ 7549 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7550 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7551 lpfc_sli4_queue_free( 7552 phba->sli4_hba.fcp_wq[idx]); 7553 phba->sli4_hba.fcp_wq[idx] = NULL; 7554 } 7555 } 7556 kfree(phba->sli4_hba.fcp_wq); 7557 phba->sli4_hba.fcp_wq = NULL; 7558 } 7559 7560 /* Release FCP CQ mapping array */ 7561 if (phba->sli4_hba.fcp_cq_map != NULL) { 7562 kfree(phba->sli4_hba.fcp_cq_map); 7563 phba->sli4_hba.fcp_cq_map = NULL; 7564 } 7565 7566 /* Release mailbox command work queue */ 7567 if (phba->sli4_hba.mbx_wq != NULL) { 7568 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7569 phba->sli4_hba.mbx_wq = NULL; 7570 } 7571 7572 /* Release ELS work queue */ 7573 if (phba->sli4_hba.els_wq != NULL) { 7574 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7575 phba->sli4_hba.els_wq = NULL; 7576 } 7577 7578 /* Release unsolicited receive queue */ 7579 if (phba->sli4_hba.hdr_rq != NULL) { 7580 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7581 phba->sli4_hba.hdr_rq = NULL; 7582 } 7583 if (phba->sli4_hba.dat_rq != NULL) { 7584 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7585 phba->sli4_hba.dat_rq = NULL; 7586 } 7587 7588 /* Release ELS complete queue */ 7589 if (phba->sli4_hba.els_cq != NULL) { 7590 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7591 phba->sli4_hba.els_cq = NULL; 7592 } 7593 7594 /* Release mailbox command complete queue */ 7595 if (phba->sli4_hba.mbx_cq != NULL) { 7596 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7597 phba->sli4_hba.mbx_cq = NULL; 7598 } 7599 7600 return; 7601 } 7602 7603 /** 7604 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7605 * @phba: pointer to lpfc hba data structure. 7606 * 7607 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7608 * operation. 7609 * 7610 * Return codes 7611 * 0 - successful 7612 * -ENOMEM - No available memory 7613 * -EIO - The mailbox failed to complete successfully. 7614 **/ 7615 int 7616 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7617 { 7618 struct lpfc_sli *psli = &phba->sli; 7619 struct lpfc_sli_ring *pring; 7620 int rc = -ENOMEM; 7621 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7622 int fcp_cq_index = 0; 7623 uint32_t shdr_status, shdr_add_status; 7624 union lpfc_sli4_cfg_shdr *shdr; 7625 LPFC_MBOXQ_t *mboxq; 7626 uint32_t length; 7627 7628 /* Check for dual-ULP support */ 7629 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7630 if (!mboxq) { 7631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7632 "3249 Unable to allocate memory for " 7633 "QUERY_FW_CFG mailbox command\n"); 7634 return -ENOMEM; 7635 } 7636 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7637 sizeof(struct lpfc_sli4_cfg_mhdr)); 7638 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7639 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7640 length, LPFC_SLI4_MBX_EMBED); 7641 7642 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7643 7644 shdr = (union lpfc_sli4_cfg_shdr *) 7645 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7646 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7647 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7648 if (shdr_status || shdr_add_status || rc) { 7649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7650 "3250 QUERY_FW_CFG mailbox failed with status " 7651 "x%x add_status x%x, mbx status x%x\n", 7652 shdr_status, shdr_add_status, rc); 7653 if (rc != MBX_TIMEOUT) 7654 mempool_free(mboxq, phba->mbox_mem_pool); 7655 rc = -ENXIO; 7656 goto out_error; 7657 } 7658 7659 phba->sli4_hba.fw_func_mode = 7660 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7661 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7662 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7663 phba->sli4_hba.physical_port = 7664 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 7665 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7666 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7667 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7668 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7669 7670 if (rc != MBX_TIMEOUT) 7671 mempool_free(mboxq, phba->mbox_mem_pool); 7672 7673 /* 7674 * Set up HBA Event Queues (EQs) 7675 */ 7676 7677 /* Set up HBA event queue */ 7678 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7680 "3147 Fast-path EQs not allocated\n"); 7681 rc = -ENOMEM; 7682 goto out_error; 7683 } 7684 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7685 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7686 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7687 "0522 Fast-path EQ (%d) not " 7688 "allocated\n", fcp_eqidx); 7689 rc = -ENOMEM; 7690 goto out_destroy_hba_eq; 7691 } 7692 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7693 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7694 if (rc) { 7695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7696 "0523 Failed setup of fast-path EQ " 7697 "(%d), rc = 0x%x\n", fcp_eqidx, 7698 (uint32_t)rc); 7699 goto out_destroy_hba_eq; 7700 } 7701 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7702 "2584 HBA EQ setup: " 7703 "queue[%d]-id=%d\n", fcp_eqidx, 7704 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7705 } 7706 7707 /* Set up fast-path FCP Response Complete Queue */ 7708 if (!phba->sli4_hba.fcp_cq) { 7709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7710 "3148 Fast-path FCP CQ array not " 7711 "allocated\n"); 7712 rc = -ENOMEM; 7713 goto out_destroy_hba_eq; 7714 } 7715 7716 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7717 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7719 "0526 Fast-path FCP CQ (%d) not " 7720 "allocated\n", fcp_cqidx); 7721 rc = -ENOMEM; 7722 goto out_destroy_fcp_cq; 7723 } 7724 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7725 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7726 if (rc) { 7727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7728 "0527 Failed setup of fast-path FCP " 7729 "CQ (%d), rc = 0x%x\n", fcp_cqidx, 7730 (uint32_t)rc); 7731 goto out_destroy_fcp_cq; 7732 } 7733 7734 /* Setup fcp_cq_map for fast lookup */ 7735 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7736 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7737 7738 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7739 "2588 FCP CQ setup: cq[%d]-id=%d, " 7740 "parent seq[%d]-id=%d\n", 7741 fcp_cqidx, 7742 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7743 fcp_cqidx, 7744 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7745 } 7746 7747 /* Set up fast-path FCP Work Queue */ 7748 if (!phba->sli4_hba.fcp_wq) { 7749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7750 "3149 Fast-path FCP WQ array not " 7751 "allocated\n"); 7752 rc = -ENOMEM; 7753 goto out_destroy_fcp_cq; 7754 } 7755 7756 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7757 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7759 "0534 Fast-path FCP WQ (%d) not " 7760 "allocated\n", fcp_wqidx); 7761 rc = -ENOMEM; 7762 goto out_destroy_fcp_wq; 7763 } 7764 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7765 phba->sli4_hba.fcp_cq[fcp_wqidx], 7766 LPFC_FCP); 7767 if (rc) { 7768 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7769 "0535 Failed setup of fast-path FCP " 7770 "WQ (%d), rc = 0x%x\n", fcp_wqidx, 7771 (uint32_t)rc); 7772 goto out_destroy_fcp_wq; 7773 } 7774 7775 /* Bind this WQ to the next FCP ring */ 7776 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7777 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7778 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7779 7780 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7781 "2591 FCP WQ setup: wq[%d]-id=%d, " 7782 "parent cq[%d]-id=%d\n", 7783 fcp_wqidx, 7784 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7785 fcp_cq_index, 7786 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7787 } 7788 /* 7789 * Set up Complete Queues (CQs) 7790 */ 7791 7792 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7793 if (!phba->sli4_hba.mbx_cq) { 7794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7795 "0528 Mailbox CQ not allocated\n"); 7796 rc = -ENOMEM; 7797 goto out_destroy_fcp_wq; 7798 } 7799 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7800 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7801 if (rc) { 7802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7803 "0529 Failed setup of slow-path mailbox CQ: " 7804 "rc = 0x%x\n", (uint32_t)rc); 7805 goto out_destroy_fcp_wq; 7806 } 7807 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7808 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7809 phba->sli4_hba.mbx_cq->queue_id, 7810 phba->sli4_hba.hba_eq[0]->queue_id); 7811 7812 /* Set up slow-path ELS Complete Queue */ 7813 if (!phba->sli4_hba.els_cq) { 7814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7815 "0530 ELS CQ not allocated\n"); 7816 rc = -ENOMEM; 7817 goto out_destroy_mbx_cq; 7818 } 7819 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7820 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7821 if (rc) { 7822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7823 "0531 Failed setup of slow-path ELS CQ: " 7824 "rc = 0x%x\n", (uint32_t)rc); 7825 goto out_destroy_mbx_cq; 7826 } 7827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7828 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7829 phba->sli4_hba.els_cq->queue_id, 7830 phba->sli4_hba.hba_eq[0]->queue_id); 7831 7832 /* 7833 * Set up all the Work Queues (WQs) 7834 */ 7835 7836 /* Set up Mailbox Command Queue */ 7837 if (!phba->sli4_hba.mbx_wq) { 7838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7839 "0538 Slow-path MQ not allocated\n"); 7840 rc = -ENOMEM; 7841 goto out_destroy_els_cq; 7842 } 7843 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7844 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7845 if (rc) { 7846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7847 "0539 Failed setup of slow-path MQ: " 7848 "rc = 0x%x\n", rc); 7849 goto out_destroy_els_cq; 7850 } 7851 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7852 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7853 phba->sli4_hba.mbx_wq->queue_id, 7854 phba->sli4_hba.mbx_cq->queue_id); 7855 7856 /* Set up slow-path ELS Work Queue */ 7857 if (!phba->sli4_hba.els_wq) { 7858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7859 "0536 Slow-path ELS WQ not allocated\n"); 7860 rc = -ENOMEM; 7861 goto out_destroy_mbx_wq; 7862 } 7863 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7864 phba->sli4_hba.els_cq, LPFC_ELS); 7865 if (rc) { 7866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7867 "0537 Failed setup of slow-path ELS WQ: " 7868 "rc = 0x%x\n", (uint32_t)rc); 7869 goto out_destroy_mbx_wq; 7870 } 7871 7872 /* Bind this WQ to the ELS ring */ 7873 pring = &psli->ring[LPFC_ELS_RING]; 7874 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7875 phba->sli4_hba.els_cq->pring = pring; 7876 7877 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7878 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7879 phba->sli4_hba.els_wq->queue_id, 7880 phba->sli4_hba.els_cq->queue_id); 7881 7882 /* 7883 * Create Receive Queue (RQ) 7884 */ 7885 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7887 "0540 Receive Queue not allocated\n"); 7888 rc = -ENOMEM; 7889 goto out_destroy_els_wq; 7890 } 7891 7892 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7893 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7894 7895 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7896 phba->sli4_hba.els_cq, LPFC_USOL); 7897 if (rc) { 7898 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7899 "0541 Failed setup of Receive Queue: " 7900 "rc = 0x%x\n", (uint32_t)rc); 7901 goto out_destroy_fcp_wq; 7902 } 7903 7904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7905 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7906 "parent cq-id=%d\n", 7907 phba->sli4_hba.hdr_rq->queue_id, 7908 phba->sli4_hba.dat_rq->queue_id, 7909 phba->sli4_hba.els_cq->queue_id); 7910 7911 if (phba->cfg_fof) { 7912 rc = lpfc_fof_queue_setup(phba); 7913 if (rc) { 7914 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7915 "0549 Failed setup of FOF Queues: " 7916 "rc = 0x%x\n", rc); 7917 goto out_destroy_els_rq; 7918 } 7919 } 7920 7921 /* 7922 * Configure EQ delay multipier for interrupt coalescing using 7923 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. 7924 */ 7925 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 7926 fcp_eqidx += LPFC_MAX_EQ_DELAY) 7927 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); 7928 return 0; 7929 7930 out_destroy_els_rq: 7931 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7932 out_destroy_els_wq: 7933 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7934 out_destroy_mbx_wq: 7935 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7936 out_destroy_els_cq: 7937 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7938 out_destroy_mbx_cq: 7939 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7940 out_destroy_fcp_wq: 7941 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7942 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7943 out_destroy_fcp_cq: 7944 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7945 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7946 out_destroy_hba_eq: 7947 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7948 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7949 out_error: 7950 return rc; 7951 } 7952 7953 /** 7954 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7955 * @phba: pointer to lpfc hba data structure. 7956 * 7957 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7958 * operation. 7959 * 7960 * Return codes 7961 * 0 - successful 7962 * -ENOMEM - No available memory 7963 * -EIO - The mailbox failed to complete successfully. 7964 **/ 7965 void 7966 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7967 { 7968 int fcp_qidx; 7969 7970 /* Unset the queues created for Flash Optimized Fabric operations */ 7971 if (phba->cfg_fof) 7972 lpfc_fof_queue_destroy(phba); 7973 /* Unset mailbox command work queue */ 7974 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7975 /* Unset ELS work queue */ 7976 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7977 /* Unset unsolicited receive queue */ 7978 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7979 /* Unset FCP work queue */ 7980 if (phba->sli4_hba.fcp_wq) { 7981 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7982 fcp_qidx++) 7983 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7984 } 7985 /* Unset mailbox command complete queue */ 7986 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7987 /* Unset ELS complete queue */ 7988 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7989 /* Unset FCP response complete queue */ 7990 if (phba->sli4_hba.fcp_cq) { 7991 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7992 fcp_qidx++) 7993 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7994 } 7995 /* Unset fast-path event queue */ 7996 if (phba->sli4_hba.hba_eq) { 7997 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7998 fcp_qidx++) 7999 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 8000 } 8001 } 8002 8003 /** 8004 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 8005 * @phba: pointer to lpfc hba data structure. 8006 * 8007 * This routine is invoked to allocate and set up a pool of completion queue 8008 * events. The body of the completion queue event is a completion queue entry 8009 * CQE. For now, this pool is used for the interrupt service routine to queue 8010 * the following HBA completion queue events for the worker thread to process: 8011 * - Mailbox asynchronous events 8012 * - Receive queue completion unsolicited events 8013 * Later, this can be used for all the slow-path events. 8014 * 8015 * Return codes 8016 * 0 - successful 8017 * -ENOMEM - No available memory 8018 **/ 8019 static int 8020 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 8021 { 8022 struct lpfc_cq_event *cq_event; 8023 int i; 8024 8025 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 8026 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 8027 if (!cq_event) 8028 goto out_pool_create_fail; 8029 list_add_tail(&cq_event->list, 8030 &phba->sli4_hba.sp_cqe_event_pool); 8031 } 8032 return 0; 8033 8034 out_pool_create_fail: 8035 lpfc_sli4_cq_event_pool_destroy(phba); 8036 return -ENOMEM; 8037 } 8038 8039 /** 8040 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 8041 * @phba: pointer to lpfc hba data structure. 8042 * 8043 * This routine is invoked to free the pool of completion queue events at 8044 * driver unload time. Note that, it is the responsibility of the driver 8045 * cleanup routine to free all the outstanding completion-queue events 8046 * allocated from this pool back into the pool before invoking this routine 8047 * to destroy the pool. 8048 **/ 8049 static void 8050 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 8051 { 8052 struct lpfc_cq_event *cq_event, *next_cq_event; 8053 8054 list_for_each_entry_safe(cq_event, next_cq_event, 8055 &phba->sli4_hba.sp_cqe_event_pool, list) { 8056 list_del(&cq_event->list); 8057 kfree(cq_event); 8058 } 8059 } 8060 8061 /** 8062 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8063 * @phba: pointer to lpfc hba data structure. 8064 * 8065 * This routine is the lock free version of the API invoked to allocate a 8066 * completion-queue event from the free pool. 8067 * 8068 * Return: Pointer to the newly allocated completion-queue event if successful 8069 * NULL otherwise. 8070 **/ 8071 struct lpfc_cq_event * 8072 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8073 { 8074 struct lpfc_cq_event *cq_event = NULL; 8075 8076 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 8077 struct lpfc_cq_event, list); 8078 return cq_event; 8079 } 8080 8081 /** 8082 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8083 * @phba: pointer to lpfc hba data structure. 8084 * 8085 * This routine is the lock version of the API invoked to allocate a 8086 * completion-queue event from the free pool. 8087 * 8088 * Return: Pointer to the newly allocated completion-queue event if successful 8089 * NULL otherwise. 8090 **/ 8091 struct lpfc_cq_event * 8092 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8093 { 8094 struct lpfc_cq_event *cq_event; 8095 unsigned long iflags; 8096 8097 spin_lock_irqsave(&phba->hbalock, iflags); 8098 cq_event = __lpfc_sli4_cq_event_alloc(phba); 8099 spin_unlock_irqrestore(&phba->hbalock, iflags); 8100 return cq_event; 8101 } 8102 8103 /** 8104 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8105 * @phba: pointer to lpfc hba data structure. 8106 * @cq_event: pointer to the completion queue event to be freed. 8107 * 8108 * This routine is the lock free version of the API invoked to release a 8109 * completion-queue event back into the free pool. 8110 **/ 8111 void 8112 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8113 struct lpfc_cq_event *cq_event) 8114 { 8115 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 8116 } 8117 8118 /** 8119 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8120 * @phba: pointer to lpfc hba data structure. 8121 * @cq_event: pointer to the completion queue event to be freed. 8122 * 8123 * This routine is the lock version of the API invoked to release a 8124 * completion-queue event back into the free pool. 8125 **/ 8126 void 8127 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8128 struct lpfc_cq_event *cq_event) 8129 { 8130 unsigned long iflags; 8131 spin_lock_irqsave(&phba->hbalock, iflags); 8132 __lpfc_sli4_cq_event_release(phba, cq_event); 8133 spin_unlock_irqrestore(&phba->hbalock, iflags); 8134 } 8135 8136 /** 8137 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 8138 * @phba: pointer to lpfc hba data structure. 8139 * 8140 * This routine is to free all the pending completion-queue events to the 8141 * back into the free pool for device reset. 8142 **/ 8143 static void 8144 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 8145 { 8146 LIST_HEAD(cqelist); 8147 struct lpfc_cq_event *cqe; 8148 unsigned long iflags; 8149 8150 /* Retrieve all the pending WCQEs from pending WCQE lists */ 8151 spin_lock_irqsave(&phba->hbalock, iflags); 8152 /* Pending FCP XRI abort events */ 8153 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8154 &cqelist); 8155 /* Pending ELS XRI abort events */ 8156 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8157 &cqelist); 8158 /* Pending asynnc events */ 8159 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 8160 &cqelist); 8161 spin_unlock_irqrestore(&phba->hbalock, iflags); 8162 8163 while (!list_empty(&cqelist)) { 8164 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 8165 lpfc_sli4_cq_event_release(phba, cqe); 8166 } 8167 } 8168 8169 /** 8170 * lpfc_pci_function_reset - Reset pci function. 8171 * @phba: pointer to lpfc hba data structure. 8172 * 8173 * This routine is invoked to request a PCI function reset. It will destroys 8174 * all resources assigned to the PCI function which originates this request. 8175 * 8176 * Return codes 8177 * 0 - successful 8178 * -ENOMEM - No available memory 8179 * -EIO - The mailbox failed to complete successfully. 8180 **/ 8181 int 8182 lpfc_pci_function_reset(struct lpfc_hba *phba) 8183 { 8184 LPFC_MBOXQ_t *mboxq; 8185 uint32_t rc = 0, if_type; 8186 uint32_t shdr_status, shdr_add_status; 8187 uint32_t rdy_chk; 8188 uint32_t port_reset = 0; 8189 union lpfc_sli4_cfg_shdr *shdr; 8190 struct lpfc_register reg_data; 8191 uint16_t devid; 8192 8193 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8194 switch (if_type) { 8195 case LPFC_SLI_INTF_IF_TYPE_0: 8196 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8197 GFP_KERNEL); 8198 if (!mboxq) { 8199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8200 "0494 Unable to allocate memory for " 8201 "issuing SLI_FUNCTION_RESET mailbox " 8202 "command\n"); 8203 return -ENOMEM; 8204 } 8205 8206 /* Setup PCI function reset mailbox-ioctl command */ 8207 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8208 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 8209 LPFC_SLI4_MBX_EMBED); 8210 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8211 shdr = (union lpfc_sli4_cfg_shdr *) 8212 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8213 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8214 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 8215 &shdr->response); 8216 if (rc != MBX_TIMEOUT) 8217 mempool_free(mboxq, phba->mbox_mem_pool); 8218 if (shdr_status || shdr_add_status || rc) { 8219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8220 "0495 SLI_FUNCTION_RESET mailbox " 8221 "failed with status x%x add_status x%x," 8222 " mbx status x%x\n", 8223 shdr_status, shdr_add_status, rc); 8224 rc = -ENXIO; 8225 } 8226 break; 8227 case LPFC_SLI_INTF_IF_TYPE_2: 8228 wait: 8229 /* 8230 * Poll the Port Status Register and wait for RDY for 8231 * up to 30 seconds. If the port doesn't respond, treat 8232 * it as an error. 8233 */ 8234 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 8235 if (lpfc_readl(phba->sli4_hba.u.if_type2. 8236 STATUSregaddr, ®_data.word0)) { 8237 rc = -ENODEV; 8238 goto out; 8239 } 8240 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 8241 break; 8242 msleep(20); 8243 } 8244 8245 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 8246 phba->work_status[0] = readl( 8247 phba->sli4_hba.u.if_type2.ERR1regaddr); 8248 phba->work_status[1] = readl( 8249 phba->sli4_hba.u.if_type2.ERR2regaddr); 8250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8251 "2890 Port not ready, port status reg " 8252 "0x%x error 1=0x%x, error 2=0x%x\n", 8253 reg_data.word0, 8254 phba->work_status[0], 8255 phba->work_status[1]); 8256 rc = -ENODEV; 8257 goto out; 8258 } 8259 8260 if (!port_reset) { 8261 /* 8262 * Reset the port now 8263 */ 8264 reg_data.word0 = 0; 8265 bf_set(lpfc_sliport_ctrl_end, ®_data, 8266 LPFC_SLIPORT_LITTLE_ENDIAN); 8267 bf_set(lpfc_sliport_ctrl_ip, ®_data, 8268 LPFC_SLIPORT_INIT_PORT); 8269 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 8270 CTRLregaddr); 8271 /* flush */ 8272 pci_read_config_word(phba->pcidev, 8273 PCI_DEVICE_ID, &devid); 8274 8275 port_reset = 1; 8276 msleep(20); 8277 goto wait; 8278 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 8279 rc = -ENODEV; 8280 goto out; 8281 } 8282 break; 8283 8284 case LPFC_SLI_INTF_IF_TYPE_1: 8285 default: 8286 break; 8287 } 8288 8289 out: 8290 /* Catch the not-ready port failure after a port reset. */ 8291 if (rc) { 8292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8293 "3317 HBA not functional: IP Reset Failed " 8294 "try: echo fw_reset > board_mode\n"); 8295 rc = -ENODEV; 8296 } 8297 8298 return rc; 8299 } 8300 8301 /** 8302 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 8303 * @phba: pointer to lpfc hba data structure. 8304 * 8305 * This routine is invoked to set up the PCI device memory space for device 8306 * with SLI-4 interface spec. 8307 * 8308 * Return codes 8309 * 0 - successful 8310 * other values - error 8311 **/ 8312 static int 8313 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 8314 { 8315 struct pci_dev *pdev; 8316 unsigned long bar0map_len, bar1map_len, bar2map_len; 8317 int error = -ENODEV; 8318 uint32_t if_type; 8319 8320 /* Obtain PCI device reference */ 8321 if (!phba->pcidev) 8322 return error; 8323 else 8324 pdev = phba->pcidev; 8325 8326 /* Set the device DMA mask size */ 8327 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 8328 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 8329 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 8330 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 8331 return error; 8332 } 8333 } 8334 8335 /* 8336 * The BARs and register set definitions and offset locations are 8337 * dependent on the if_type. 8338 */ 8339 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 8340 &phba->sli4_hba.sli_intf.word0)) { 8341 return error; 8342 } 8343 8344 /* There is no SLI3 failback for SLI4 devices. */ 8345 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 8346 LPFC_SLI_INTF_VALID) { 8347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8348 "2894 SLI_INTF reg contents invalid " 8349 "sli_intf reg 0x%x\n", 8350 phba->sli4_hba.sli_intf.word0); 8351 return error; 8352 } 8353 8354 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8355 /* 8356 * Get the bus address of SLI4 device Bar regions and the 8357 * number of bytes required by each mapping. The mapping of the 8358 * particular PCI BARs regions is dependent on the type of 8359 * SLI4 device. 8360 */ 8361 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 8362 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 8363 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 8364 8365 /* 8366 * Map SLI4 PCI Config Space Register base to a kernel virtual 8367 * addr 8368 */ 8369 phba->sli4_hba.conf_regs_memmap_p = 8370 ioremap(phba->pci_bar0_map, bar0map_len); 8371 if (!phba->sli4_hba.conf_regs_memmap_p) { 8372 dev_printk(KERN_ERR, &pdev->dev, 8373 "ioremap failed for SLI4 PCI config " 8374 "registers.\n"); 8375 goto out; 8376 } 8377 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 8378 /* Set up BAR0 PCI config space register memory map */ 8379 lpfc_sli4_bar0_register_memmap(phba, if_type); 8380 } else { 8381 phba->pci_bar0_map = pci_resource_start(pdev, 1); 8382 bar0map_len = pci_resource_len(pdev, 1); 8383 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8384 dev_printk(KERN_ERR, &pdev->dev, 8385 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8386 goto out; 8387 } 8388 phba->sli4_hba.conf_regs_memmap_p = 8389 ioremap(phba->pci_bar0_map, bar0map_len); 8390 if (!phba->sli4_hba.conf_regs_memmap_p) { 8391 dev_printk(KERN_ERR, &pdev->dev, 8392 "ioremap failed for SLI4 PCI config " 8393 "registers.\n"); 8394 goto out; 8395 } 8396 lpfc_sli4_bar0_register_memmap(phba, if_type); 8397 } 8398 8399 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8400 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8401 /* 8402 * Map SLI4 if type 0 HBA Control Register base to a kernel 8403 * virtual address and setup the registers. 8404 */ 8405 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8406 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8407 phba->sli4_hba.ctrl_regs_memmap_p = 8408 ioremap(phba->pci_bar1_map, bar1map_len); 8409 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8410 dev_printk(KERN_ERR, &pdev->dev, 8411 "ioremap failed for SLI4 HBA control registers.\n"); 8412 goto out_iounmap_conf; 8413 } 8414 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8415 lpfc_sli4_bar1_register_memmap(phba); 8416 } 8417 8418 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8419 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8420 /* 8421 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8422 * virtual address and setup the registers. 8423 */ 8424 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8425 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8426 phba->sli4_hba.drbl_regs_memmap_p = 8427 ioremap(phba->pci_bar2_map, bar2map_len); 8428 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8429 dev_printk(KERN_ERR, &pdev->dev, 8430 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8431 goto out_iounmap_ctrl; 8432 } 8433 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8434 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8435 if (error) 8436 goto out_iounmap_all; 8437 } 8438 8439 return 0; 8440 8441 out_iounmap_all: 8442 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8443 out_iounmap_ctrl: 8444 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8445 out_iounmap_conf: 8446 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8447 out: 8448 return error; 8449 } 8450 8451 /** 8452 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8453 * @phba: pointer to lpfc hba data structure. 8454 * 8455 * This routine is invoked to unset the PCI device memory space for device 8456 * with SLI-4 interface spec. 8457 **/ 8458 static void 8459 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8460 { 8461 uint32_t if_type; 8462 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8463 8464 switch (if_type) { 8465 case LPFC_SLI_INTF_IF_TYPE_0: 8466 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8467 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8468 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8469 break; 8470 case LPFC_SLI_INTF_IF_TYPE_2: 8471 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8472 break; 8473 case LPFC_SLI_INTF_IF_TYPE_1: 8474 default: 8475 dev_printk(KERN_ERR, &phba->pcidev->dev, 8476 "FATAL - unsupported SLI4 interface type - %d\n", 8477 if_type); 8478 break; 8479 } 8480 } 8481 8482 /** 8483 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8484 * @phba: pointer to lpfc hba data structure. 8485 * 8486 * This routine is invoked to enable the MSI-X interrupt vectors to device 8487 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() 8488 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), 8489 * once invoked, enables either all or nothing, depending on the current 8490 * availability of PCI vector resources. The device driver is responsible 8491 * for calling the individual request_irq() to register each MSI-X vector 8492 * with a interrupt handler, which is done in this function. Note that 8493 * later when device is unloading, the driver should always call free_irq() 8494 * on all MSI-X vectors it has done request_irq() on before calling 8495 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8496 * will be left with MSI-X enabled and leaks its vectors. 8497 * 8498 * Return codes 8499 * 0 - successful 8500 * other values - error 8501 **/ 8502 static int 8503 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8504 { 8505 int rc, i; 8506 LPFC_MBOXQ_t *pmb; 8507 8508 /* Set up MSI-X multi-message vectors */ 8509 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8510 phba->msix_entries[i].entry = i; 8511 8512 /* Configure MSI-X capability structure */ 8513 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, 8514 LPFC_MSIX_VECTORS); 8515 if (rc) { 8516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8517 "0420 PCI enable MSI-X failed (%d)\n", rc); 8518 goto vec_fail_out; 8519 } 8520 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8521 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8522 "0477 MSI-X entry[%d]: vector=x%x " 8523 "message=%d\n", i, 8524 phba->msix_entries[i].vector, 8525 phba->msix_entries[i].entry); 8526 /* 8527 * Assign MSI-X vectors to interrupt handlers 8528 */ 8529 8530 /* vector-0 is associated to slow-path handler */ 8531 rc = request_irq(phba->msix_entries[0].vector, 8532 &lpfc_sli_sp_intr_handler, 0, 8533 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8534 if (rc) { 8535 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8536 "0421 MSI-X slow-path request_irq failed " 8537 "(%d)\n", rc); 8538 goto msi_fail_out; 8539 } 8540 8541 /* vector-1 is associated to fast-path handler */ 8542 rc = request_irq(phba->msix_entries[1].vector, 8543 &lpfc_sli_fp_intr_handler, 0, 8544 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8545 8546 if (rc) { 8547 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8548 "0429 MSI-X fast-path request_irq failed " 8549 "(%d)\n", rc); 8550 goto irq_fail_out; 8551 } 8552 8553 /* 8554 * Configure HBA MSI-X attention conditions to messages 8555 */ 8556 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8557 8558 if (!pmb) { 8559 rc = -ENOMEM; 8560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8561 "0474 Unable to allocate memory for issuing " 8562 "MBOX_CONFIG_MSI command\n"); 8563 goto mem_fail_out; 8564 } 8565 rc = lpfc_config_msi(phba, pmb); 8566 if (rc) 8567 goto mbx_fail_out; 8568 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8569 if (rc != MBX_SUCCESS) { 8570 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8571 "0351 Config MSI mailbox command failed, " 8572 "mbxCmd x%x, mbxStatus x%x\n", 8573 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8574 goto mbx_fail_out; 8575 } 8576 8577 /* Free memory allocated for mailbox command */ 8578 mempool_free(pmb, phba->mbox_mem_pool); 8579 return rc; 8580 8581 mbx_fail_out: 8582 /* Free memory allocated for mailbox command */ 8583 mempool_free(pmb, phba->mbox_mem_pool); 8584 8585 mem_fail_out: 8586 /* free the irq already requested */ 8587 free_irq(phba->msix_entries[1].vector, phba); 8588 8589 irq_fail_out: 8590 /* free the irq already requested */ 8591 free_irq(phba->msix_entries[0].vector, phba); 8592 8593 msi_fail_out: 8594 /* Unconfigure MSI-X capability structure */ 8595 pci_disable_msix(phba->pcidev); 8596 8597 vec_fail_out: 8598 return rc; 8599 } 8600 8601 /** 8602 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8603 * @phba: pointer to lpfc hba data structure. 8604 * 8605 * This routine is invoked to release the MSI-X vectors and then disable the 8606 * MSI-X interrupt mode to device with SLI-3 interface spec. 8607 **/ 8608 static void 8609 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8610 { 8611 int i; 8612 8613 /* Free up MSI-X multi-message vectors */ 8614 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8615 free_irq(phba->msix_entries[i].vector, phba); 8616 /* Disable MSI-X */ 8617 pci_disable_msix(phba->pcidev); 8618 8619 return; 8620 } 8621 8622 /** 8623 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8624 * @phba: pointer to lpfc hba data structure. 8625 * 8626 * This routine is invoked to enable the MSI interrupt mode to device with 8627 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8628 * enable the MSI vector. The device driver is responsible for calling the 8629 * request_irq() to register MSI vector with a interrupt the handler, which 8630 * is done in this function. 8631 * 8632 * Return codes 8633 * 0 - successful 8634 * other values - error 8635 */ 8636 static int 8637 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8638 { 8639 int rc; 8640 8641 rc = pci_enable_msi(phba->pcidev); 8642 if (!rc) 8643 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8644 "0462 PCI enable MSI mode success.\n"); 8645 else { 8646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8647 "0471 PCI enable MSI mode failed (%d)\n", rc); 8648 return rc; 8649 } 8650 8651 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8652 0, LPFC_DRIVER_NAME, phba); 8653 if (rc) { 8654 pci_disable_msi(phba->pcidev); 8655 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8656 "0478 MSI request_irq failed (%d)\n", rc); 8657 } 8658 return rc; 8659 } 8660 8661 /** 8662 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8663 * @phba: pointer to lpfc hba data structure. 8664 * 8665 * This routine is invoked to disable the MSI interrupt mode to device with 8666 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8667 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8668 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8669 * its vector. 8670 */ 8671 static void 8672 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8673 { 8674 free_irq(phba->pcidev->irq, phba); 8675 pci_disable_msi(phba->pcidev); 8676 return; 8677 } 8678 8679 /** 8680 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8681 * @phba: pointer to lpfc hba data structure. 8682 * 8683 * This routine is invoked to enable device interrupt and associate driver's 8684 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8685 * spec. Depends on the interrupt mode configured to the driver, the driver 8686 * will try to fallback from the configured interrupt mode to an interrupt 8687 * mode which is supported by the platform, kernel, and device in the order 8688 * of: 8689 * MSI-X -> MSI -> IRQ. 8690 * 8691 * Return codes 8692 * 0 - successful 8693 * other values - error 8694 **/ 8695 static uint32_t 8696 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8697 { 8698 uint32_t intr_mode = LPFC_INTR_ERROR; 8699 int retval; 8700 8701 if (cfg_mode == 2) { 8702 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8703 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8704 if (!retval) { 8705 /* Now, try to enable MSI-X interrupt mode */ 8706 retval = lpfc_sli_enable_msix(phba); 8707 if (!retval) { 8708 /* Indicate initialization to MSI-X mode */ 8709 phba->intr_type = MSIX; 8710 intr_mode = 2; 8711 } 8712 } 8713 } 8714 8715 /* Fallback to MSI if MSI-X initialization failed */ 8716 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8717 retval = lpfc_sli_enable_msi(phba); 8718 if (!retval) { 8719 /* Indicate initialization to MSI mode */ 8720 phba->intr_type = MSI; 8721 intr_mode = 1; 8722 } 8723 } 8724 8725 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8726 if (phba->intr_type == NONE) { 8727 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8728 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8729 if (!retval) { 8730 /* Indicate initialization to INTx mode */ 8731 phba->intr_type = INTx; 8732 intr_mode = 0; 8733 } 8734 } 8735 return intr_mode; 8736 } 8737 8738 /** 8739 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8740 * @phba: pointer to lpfc hba data structure. 8741 * 8742 * This routine is invoked to disable device interrupt and disassociate the 8743 * driver's interrupt handler(s) from interrupt vector(s) to device with 8744 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8745 * release the interrupt vector(s) for the message signaled interrupt. 8746 **/ 8747 static void 8748 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8749 { 8750 /* Disable the currently initialized interrupt mode */ 8751 if (phba->intr_type == MSIX) 8752 lpfc_sli_disable_msix(phba); 8753 else if (phba->intr_type == MSI) 8754 lpfc_sli_disable_msi(phba); 8755 else if (phba->intr_type == INTx) 8756 free_irq(phba->pcidev->irq, phba); 8757 8758 /* Reset interrupt management states */ 8759 phba->intr_type = NONE; 8760 phba->sli.slistat.sli_intr = 0; 8761 8762 return; 8763 } 8764 8765 /** 8766 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8767 * @phba: pointer to lpfc hba data structure. 8768 * 8769 * Find next available CPU to use for IRQ to CPU affinity. 8770 */ 8771 static int 8772 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8773 { 8774 struct lpfc_vector_map_info *cpup; 8775 int cpu; 8776 8777 cpup = phba->sli4_hba.cpu_map; 8778 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8779 /* CPU must be online */ 8780 if (cpu_online(cpu)) { 8781 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8782 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8783 (cpup->phys_id == phys_id)) { 8784 return cpu; 8785 } 8786 } 8787 cpup++; 8788 } 8789 8790 /* 8791 * If we get here, we have used ALL CPUs for the specific 8792 * phys_id. Now we need to clear out lpfc_used_cpu and start 8793 * reusing CPUs. 8794 */ 8795 8796 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8797 if (lpfc_used_cpu[cpu] == phys_id) 8798 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8799 } 8800 8801 cpup = phba->sli4_hba.cpu_map; 8802 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8803 /* CPU must be online */ 8804 if (cpu_online(cpu)) { 8805 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8806 (cpup->phys_id == phys_id)) { 8807 return cpu; 8808 } 8809 } 8810 cpup++; 8811 } 8812 return LPFC_VECTOR_MAP_EMPTY; 8813 } 8814 8815 /** 8816 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8817 * @phba: pointer to lpfc hba data structure. 8818 * @vectors: number of HBA vectors 8819 * 8820 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8821 * affinization across multple physical CPUs (numa nodes). 8822 * In addition, this routine will assign an IO channel for each CPU 8823 * to use when issuing I/Os. 8824 */ 8825 static int 8826 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8827 { 8828 int i, idx, saved_chann, used_chann, cpu, phys_id; 8829 int max_phys_id, min_phys_id; 8830 int num_io_channel, first_cpu, chan; 8831 struct lpfc_vector_map_info *cpup; 8832 #ifdef CONFIG_X86 8833 struct cpuinfo_x86 *cpuinfo; 8834 #endif 8835 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8836 8837 /* If there is no mapping, just return */ 8838 if (!phba->cfg_fcp_cpu_map) 8839 return 1; 8840 8841 /* Init cpu_map array */ 8842 memset(phba->sli4_hba.cpu_map, 0xff, 8843 (sizeof(struct lpfc_vector_map_info) * 8844 phba->sli4_hba.num_present_cpu)); 8845 8846 max_phys_id = 0; 8847 min_phys_id = 0xff; 8848 phys_id = 0; 8849 num_io_channel = 0; 8850 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8851 8852 /* Update CPU map with physical id and core id of each CPU */ 8853 cpup = phba->sli4_hba.cpu_map; 8854 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8855 #ifdef CONFIG_X86 8856 cpuinfo = &cpu_data(cpu); 8857 cpup->phys_id = cpuinfo->phys_proc_id; 8858 cpup->core_id = cpuinfo->cpu_core_id; 8859 #else 8860 /* No distinction between CPUs for other platforms */ 8861 cpup->phys_id = 0; 8862 cpup->core_id = 0; 8863 #endif 8864 8865 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8866 "3328 CPU physid %d coreid %d\n", 8867 cpup->phys_id, cpup->core_id); 8868 8869 if (cpup->phys_id > max_phys_id) 8870 max_phys_id = cpup->phys_id; 8871 if (cpup->phys_id < min_phys_id) 8872 min_phys_id = cpup->phys_id; 8873 cpup++; 8874 } 8875 8876 phys_id = min_phys_id; 8877 /* Now associate the HBA vectors with specific CPUs */ 8878 for (idx = 0; idx < vectors; idx++) { 8879 cpup = phba->sli4_hba.cpu_map; 8880 cpu = lpfc_find_next_cpu(phba, phys_id); 8881 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8882 8883 /* Try for all phys_id's */ 8884 for (i = 1; i < max_phys_id; i++) { 8885 phys_id++; 8886 if (phys_id > max_phys_id) 8887 phys_id = min_phys_id; 8888 cpu = lpfc_find_next_cpu(phba, phys_id); 8889 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8890 continue; 8891 goto found; 8892 } 8893 8894 /* Use round robin for scheduling */ 8895 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8896 chan = 0; 8897 cpup = phba->sli4_hba.cpu_map; 8898 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8899 cpup->channel_id = chan; 8900 cpup++; 8901 chan++; 8902 if (chan >= phba->cfg_fcp_io_channel) 8903 chan = 0; 8904 } 8905 8906 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8907 "3329 Cannot set affinity:" 8908 "Error mapping vector %d (%d)\n", 8909 idx, vectors); 8910 return 0; 8911 } 8912 found: 8913 cpup += cpu; 8914 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8915 lpfc_used_cpu[cpu] = phys_id; 8916 8917 /* Associate vector with selected CPU */ 8918 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8919 8920 /* Associate IO channel with selected CPU */ 8921 cpup->channel_id = idx; 8922 num_io_channel++; 8923 8924 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8925 first_cpu = cpu; 8926 8927 /* Now affinitize to the selected CPU */ 8928 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8929 vector, get_cpu_mask(cpu)); 8930 8931 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8932 "3330 Set Affinity: CPU %d channel %d " 8933 "irq %d (%x)\n", 8934 cpu, cpup->channel_id, 8935 phba->sli4_hba.msix_entries[idx].vector, i); 8936 8937 /* Spread vector mapping across multple physical CPU nodes */ 8938 phys_id++; 8939 if (phys_id > max_phys_id) 8940 phys_id = min_phys_id; 8941 } 8942 8943 /* 8944 * Finally fill in the IO channel for any remaining CPUs. 8945 * At this point, all IO channels have been assigned to a specific 8946 * MSIx vector, mapped to a specific CPU. 8947 * Base the remaining IO channel assigned, to IO channels already 8948 * assigned to other CPUs on the same phys_id. 8949 */ 8950 for (i = min_phys_id; i <= max_phys_id; i++) { 8951 /* 8952 * If there are no io channels already mapped to 8953 * this phys_id, just round robin thru the io_channels. 8954 * Setup chann[] for round robin. 8955 */ 8956 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8957 chann[idx] = idx; 8958 8959 saved_chann = 0; 8960 used_chann = 0; 8961 8962 /* 8963 * First build a list of IO channels already assigned 8964 * to this phys_id before reassigning the same IO 8965 * channels to the remaining CPUs. 8966 */ 8967 cpup = phba->sli4_hba.cpu_map; 8968 cpu = first_cpu; 8969 cpup += cpu; 8970 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8971 idx++) { 8972 if (cpup->phys_id == i) { 8973 /* 8974 * Save any IO channels that are 8975 * already mapped to this phys_id. 8976 */ 8977 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8978 if (saved_chann <= 8979 LPFC_FCP_IO_CHAN_MAX) { 8980 chann[saved_chann] = 8981 cpup->channel_id; 8982 saved_chann++; 8983 } 8984 goto out; 8985 } 8986 8987 /* See if we are using round-robin */ 8988 if (saved_chann == 0) 8989 saved_chann = 8990 phba->cfg_fcp_io_channel; 8991 8992 /* Associate next IO channel with CPU */ 8993 cpup->channel_id = chann[used_chann]; 8994 num_io_channel++; 8995 used_chann++; 8996 if (used_chann == saved_chann) 8997 used_chann = 0; 8998 8999 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9000 "3331 Set IO_CHANN " 9001 "CPU %d channel %d\n", 9002 idx, cpup->channel_id); 9003 } 9004 out: 9005 cpu++; 9006 if (cpu >= phba->sli4_hba.num_present_cpu) { 9007 cpup = phba->sli4_hba.cpu_map; 9008 cpu = 0; 9009 } else { 9010 cpup++; 9011 } 9012 } 9013 } 9014 9015 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 9016 cpup = phba->sli4_hba.cpu_map; 9017 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 9018 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 9019 cpup->channel_id = 0; 9020 num_io_channel++; 9021 9022 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9023 "3332 Assign IO_CHANN " 9024 "CPU %d channel %d\n", 9025 idx, cpup->channel_id); 9026 } 9027 cpup++; 9028 } 9029 } 9030 9031 /* Sanity check */ 9032 if (num_io_channel != phba->sli4_hba.num_present_cpu) 9033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9034 "3333 Set affinity mismatch:" 9035 "%d chann != %d cpus: %d vectors\n", 9036 num_io_channel, phba->sli4_hba.num_present_cpu, 9037 vectors); 9038 9039 /* Enable using cpu affinity for scheduling */ 9040 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 9041 return 1; 9042 } 9043 9044 9045 /** 9046 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 9047 * @phba: pointer to lpfc hba data structure. 9048 * 9049 * This routine is invoked to enable the MSI-X interrupt vectors to device 9050 * with SLI-4 interface spec. The kernel function pci_enable_msix_range() 9051 * is called to enable the MSI-X vectors. The device driver is responsible 9052 * for calling the individual request_irq() to register each MSI-X vector 9053 * with a interrupt handler, which is done in this function. Note that 9054 * later when device is unloading, the driver should always call free_irq() 9055 * on all MSI-X vectors it has done request_irq() on before calling 9056 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 9057 * will be left with MSI-X enabled and leaks its vectors. 9058 * 9059 * Return codes 9060 * 0 - successful 9061 * other values - error 9062 **/ 9063 static int 9064 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 9065 { 9066 int vectors, rc, index; 9067 9068 /* Set up MSI-X multi-message vectors */ 9069 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 9070 phba->sli4_hba.msix_entries[index].entry = index; 9071 9072 /* Configure MSI-X capability structure */ 9073 vectors = phba->cfg_fcp_io_channel; 9074 if (phba->cfg_fof) { 9075 phba->sli4_hba.msix_entries[index].entry = index; 9076 vectors++; 9077 } 9078 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, 9079 2, vectors); 9080 if (rc < 0) { 9081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9082 "0484 PCI enable MSI-X failed (%d)\n", rc); 9083 goto vec_fail_out; 9084 } 9085 vectors = rc; 9086 9087 /* Log MSI-X vector assignment */ 9088 for (index = 0; index < vectors; index++) 9089 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9090 "0489 MSI-X entry[%d]: vector=x%x " 9091 "message=%d\n", index, 9092 phba->sli4_hba.msix_entries[index].vector, 9093 phba->sli4_hba.msix_entries[index].entry); 9094 9095 /* Assign MSI-X vectors to interrupt handlers */ 9096 for (index = 0; index < vectors; index++) { 9097 memset(&phba->sli4_hba.handler_name[index], 0, 16); 9098 snprintf((char *)&phba->sli4_hba.handler_name[index], 9099 LPFC_SLI4_HANDLER_NAME_SZ, 9100 LPFC_DRIVER_HANDLER_NAME"%d", index); 9101 9102 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9103 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9104 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 9105 if (phba->cfg_fof && (index == (vectors - 1))) 9106 rc = request_irq( 9107 phba->sli4_hba.msix_entries[index].vector, 9108 &lpfc_sli4_fof_intr_handler, 0, 9109 (char *)&phba->sli4_hba.handler_name[index], 9110 &phba->sli4_hba.fcp_eq_hdl[index]); 9111 else 9112 rc = request_irq( 9113 phba->sli4_hba.msix_entries[index].vector, 9114 &lpfc_sli4_hba_intr_handler, 0, 9115 (char *)&phba->sli4_hba.handler_name[index], 9116 &phba->sli4_hba.fcp_eq_hdl[index]); 9117 if (rc) { 9118 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9119 "0486 MSI-X fast-path (%d) " 9120 "request_irq failed (%d)\n", index, rc); 9121 goto cfg_fail_out; 9122 } 9123 } 9124 9125 if (phba->cfg_fof) 9126 vectors--; 9127 9128 if (vectors != phba->cfg_fcp_io_channel) { 9129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9130 "3238 Reducing IO channels to match number of " 9131 "MSI-X vectors, requested %d got %d\n", 9132 phba->cfg_fcp_io_channel, vectors); 9133 phba->cfg_fcp_io_channel = vectors; 9134 } 9135 9136 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) 9137 lpfc_sli4_set_affinity(phba, vectors); 9138 return rc; 9139 9140 cfg_fail_out: 9141 /* free the irq already requested */ 9142 for (--index; index >= 0; index--) { 9143 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9144 vector, NULL); 9145 free_irq(phba->sli4_hba.msix_entries[index].vector, 9146 &phba->sli4_hba.fcp_eq_hdl[index]); 9147 } 9148 9149 /* Unconfigure MSI-X capability structure */ 9150 pci_disable_msix(phba->pcidev); 9151 9152 vec_fail_out: 9153 return rc; 9154 } 9155 9156 /** 9157 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 9158 * @phba: pointer to lpfc hba data structure. 9159 * 9160 * This routine is invoked to release the MSI-X vectors and then disable the 9161 * MSI-X interrupt mode to device with SLI-4 interface spec. 9162 **/ 9163 static void 9164 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 9165 { 9166 int index; 9167 9168 /* Free up MSI-X multi-message vectors */ 9169 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9170 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9171 vector, NULL); 9172 free_irq(phba->sli4_hba.msix_entries[index].vector, 9173 &phba->sli4_hba.fcp_eq_hdl[index]); 9174 } 9175 if (phba->cfg_fof) { 9176 free_irq(phba->sli4_hba.msix_entries[index].vector, 9177 &phba->sli4_hba.fcp_eq_hdl[index]); 9178 } 9179 /* Disable MSI-X */ 9180 pci_disable_msix(phba->pcidev); 9181 9182 return; 9183 } 9184 9185 /** 9186 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9187 * @phba: pointer to lpfc hba data structure. 9188 * 9189 * This routine is invoked to enable the MSI interrupt mode to device with 9190 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9191 * to enable the MSI vector. The device driver is responsible for calling 9192 * the request_irq() to register MSI vector with a interrupt the handler, 9193 * which is done in this function. 9194 * 9195 * Return codes 9196 * 0 - successful 9197 * other values - error 9198 **/ 9199 static int 9200 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9201 { 9202 int rc, index; 9203 9204 rc = pci_enable_msi(phba->pcidev); 9205 if (!rc) 9206 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9207 "0487 PCI enable MSI mode success.\n"); 9208 else { 9209 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9210 "0488 PCI enable MSI mode failed (%d)\n", rc); 9211 return rc; 9212 } 9213 9214 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9215 0, LPFC_DRIVER_NAME, phba); 9216 if (rc) { 9217 pci_disable_msi(phba->pcidev); 9218 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9219 "0490 MSI request_irq failed (%d)\n", rc); 9220 return rc; 9221 } 9222 9223 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9224 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9225 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9226 } 9227 9228 if (phba->cfg_fof) { 9229 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9230 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9231 } 9232 return 0; 9233 } 9234 9235 /** 9236 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 9237 * @phba: pointer to lpfc hba data structure. 9238 * 9239 * This routine is invoked to disable the MSI interrupt mode to device with 9240 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 9241 * done request_irq() on before calling pci_disable_msi(). Failure to do so 9242 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 9243 * its vector. 9244 **/ 9245 static void 9246 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 9247 { 9248 free_irq(phba->pcidev->irq, phba); 9249 pci_disable_msi(phba->pcidev); 9250 return; 9251 } 9252 9253 /** 9254 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 9255 * @phba: pointer to lpfc hba data structure. 9256 * 9257 * This routine is invoked to enable device interrupt and associate driver's 9258 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 9259 * interface spec. Depends on the interrupt mode configured to the driver, 9260 * the driver will try to fallback from the configured interrupt mode to an 9261 * interrupt mode which is supported by the platform, kernel, and device in 9262 * the order of: 9263 * MSI-X -> MSI -> IRQ. 9264 * 9265 * Return codes 9266 * 0 - successful 9267 * other values - error 9268 **/ 9269 static uint32_t 9270 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9271 { 9272 uint32_t intr_mode = LPFC_INTR_ERROR; 9273 int retval, index; 9274 9275 if (cfg_mode == 2) { 9276 /* Preparation before conf_msi mbox cmd */ 9277 retval = 0; 9278 if (!retval) { 9279 /* Now, try to enable MSI-X interrupt mode */ 9280 retval = lpfc_sli4_enable_msix(phba); 9281 if (!retval) { 9282 /* Indicate initialization to MSI-X mode */ 9283 phba->intr_type = MSIX; 9284 intr_mode = 2; 9285 } 9286 } 9287 } 9288 9289 /* Fallback to MSI if MSI-X initialization failed */ 9290 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9291 retval = lpfc_sli4_enable_msi(phba); 9292 if (!retval) { 9293 /* Indicate initialization to MSI mode */ 9294 phba->intr_type = MSI; 9295 intr_mode = 1; 9296 } 9297 } 9298 9299 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9300 if (phba->intr_type == NONE) { 9301 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9302 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9303 if (!retval) { 9304 /* Indicate initialization to INTx mode */ 9305 phba->intr_type = INTx; 9306 intr_mode = 0; 9307 for (index = 0; index < phba->cfg_fcp_io_channel; 9308 index++) { 9309 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9310 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9311 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9312 fcp_eq_in_use, 1); 9313 } 9314 if (phba->cfg_fof) { 9315 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9316 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9317 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9318 fcp_eq_in_use, 1); 9319 } 9320 } 9321 } 9322 return intr_mode; 9323 } 9324 9325 /** 9326 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9327 * @phba: pointer to lpfc hba data structure. 9328 * 9329 * This routine is invoked to disable device interrupt and disassociate 9330 * the driver's interrupt handler(s) from interrupt vector(s) to device 9331 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9332 * will release the interrupt vector(s) for the message signaled interrupt. 9333 **/ 9334 static void 9335 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9336 { 9337 /* Disable the currently initialized interrupt mode */ 9338 if (phba->intr_type == MSIX) 9339 lpfc_sli4_disable_msix(phba); 9340 else if (phba->intr_type == MSI) 9341 lpfc_sli4_disable_msi(phba); 9342 else if (phba->intr_type == INTx) 9343 free_irq(phba->pcidev->irq, phba); 9344 9345 /* Reset interrupt management states */ 9346 phba->intr_type = NONE; 9347 phba->sli.slistat.sli_intr = 0; 9348 9349 return; 9350 } 9351 9352 /** 9353 * lpfc_unset_hba - Unset SLI3 hba device initialization 9354 * @phba: pointer to lpfc hba data structure. 9355 * 9356 * This routine is invoked to unset the HBA device initialization steps to 9357 * a device with SLI-3 interface spec. 9358 **/ 9359 static void 9360 lpfc_unset_hba(struct lpfc_hba *phba) 9361 { 9362 struct lpfc_vport *vport = phba->pport; 9363 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9364 9365 spin_lock_irq(shost->host_lock); 9366 vport->load_flag |= FC_UNLOADING; 9367 spin_unlock_irq(shost->host_lock); 9368 9369 kfree(phba->vpi_bmask); 9370 kfree(phba->vpi_ids); 9371 9372 lpfc_stop_hba_timers(phba); 9373 9374 phba->pport->work_port_events = 0; 9375 9376 lpfc_sli_hba_down(phba); 9377 9378 lpfc_sli_brdrestart(phba); 9379 9380 lpfc_sli_disable_intr(phba); 9381 9382 return; 9383 } 9384 9385 /** 9386 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9387 * @phba: Pointer to HBA context object. 9388 * 9389 * This function is called in the SLI4 code path to wait for completion 9390 * of device's XRIs exchange busy. It will check the XRI exchange busy 9391 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9392 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9393 * I/Os every 30 seconds, log error message, and wait forever. Only when 9394 * all XRI exchange busy complete, the driver unload shall proceed with 9395 * invoking the function reset ioctl mailbox command to the CNA and the 9396 * the rest of the driver unload resource release. 9397 **/ 9398 static void 9399 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9400 { 9401 int wait_time = 0; 9402 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9403 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9404 9405 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9406 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9407 if (!fcp_xri_cmpl) 9408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9409 "2877 FCP XRI exchange busy " 9410 "wait time: %d seconds.\n", 9411 wait_time/1000); 9412 if (!els_xri_cmpl) 9413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9414 "2878 ELS XRI exchange busy " 9415 "wait time: %d seconds.\n", 9416 wait_time/1000); 9417 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9418 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9419 } else { 9420 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9421 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9422 } 9423 fcp_xri_cmpl = 9424 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9425 els_xri_cmpl = 9426 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9427 } 9428 } 9429 9430 /** 9431 * lpfc_sli4_hba_unset - Unset the fcoe hba 9432 * @phba: Pointer to HBA context object. 9433 * 9434 * This function is called in the SLI4 code path to reset the HBA's FCoE 9435 * function. The caller is not required to hold any lock. This routine 9436 * issues PCI function reset mailbox command to reset the FCoE function. 9437 * At the end of the function, it calls lpfc_hba_down_post function to 9438 * free any pending commands. 9439 **/ 9440 static void 9441 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9442 { 9443 int wait_cnt = 0; 9444 LPFC_MBOXQ_t *mboxq; 9445 struct pci_dev *pdev = phba->pcidev; 9446 9447 lpfc_stop_hba_timers(phba); 9448 phba->sli4_hba.intr_enable = 0; 9449 9450 /* 9451 * Gracefully wait out the potential current outstanding asynchronous 9452 * mailbox command. 9453 */ 9454 9455 /* First, block any pending async mailbox command from posted */ 9456 spin_lock_irq(&phba->hbalock); 9457 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9458 spin_unlock_irq(&phba->hbalock); 9459 /* Now, trying to wait it out if we can */ 9460 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9461 msleep(10); 9462 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9463 break; 9464 } 9465 /* Forcefully release the outstanding mailbox command if timed out */ 9466 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9467 spin_lock_irq(&phba->hbalock); 9468 mboxq = phba->sli.mbox_active; 9469 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9470 __lpfc_mbox_cmpl_put(phba, mboxq); 9471 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9472 phba->sli.mbox_active = NULL; 9473 spin_unlock_irq(&phba->hbalock); 9474 } 9475 9476 /* Abort all iocbs associated with the hba */ 9477 lpfc_sli_hba_iocb_abort(phba); 9478 9479 /* Wait for completion of device XRI exchange busy */ 9480 lpfc_sli4_xri_exchange_busy_wait(phba); 9481 9482 /* Disable PCI subsystem interrupt */ 9483 lpfc_sli4_disable_intr(phba); 9484 9485 /* Disable SR-IOV if enabled */ 9486 if (phba->cfg_sriov_nr_virtfn) 9487 pci_disable_sriov(pdev); 9488 9489 /* Stop kthread signal shall trigger work_done one more time */ 9490 kthread_stop(phba->worker_thread); 9491 9492 /* Reset SLI4 HBA FCoE function */ 9493 lpfc_pci_function_reset(phba); 9494 lpfc_sli4_queue_destroy(phba); 9495 9496 /* Stop the SLI4 device port */ 9497 phba->pport->work_port_events = 0; 9498 } 9499 9500 /** 9501 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9502 * @phba: Pointer to HBA context object. 9503 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9504 * 9505 * This function is called in the SLI4 code path to read the port's 9506 * sli4 capabilities. 9507 * 9508 * This function may be be called from any context that can block-wait 9509 * for the completion. The expectation is that this routine is called 9510 * typically from probe_one or from the online routine. 9511 **/ 9512 int 9513 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9514 { 9515 int rc; 9516 struct lpfc_mqe *mqe; 9517 struct lpfc_pc_sli4_params *sli4_params; 9518 uint32_t mbox_tmo; 9519 9520 rc = 0; 9521 mqe = &mboxq->u.mqe; 9522 9523 /* Read the port's SLI4 Parameters port capabilities */ 9524 lpfc_pc_sli4_params(mboxq); 9525 if (!phba->sli4_hba.intr_enable) 9526 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9527 else { 9528 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9529 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9530 } 9531 9532 if (unlikely(rc)) 9533 return 1; 9534 9535 sli4_params = &phba->sli4_hba.pc_sli4_params; 9536 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9537 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9538 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9539 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9540 &mqe->un.sli4_params); 9541 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9542 &mqe->un.sli4_params); 9543 sli4_params->proto_types = mqe->un.sli4_params.word3; 9544 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9545 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9546 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9547 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9548 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9549 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9550 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9551 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9552 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9553 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9554 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9555 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9556 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9557 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9558 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9559 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9560 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9561 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9562 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9563 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9564 9565 /* Make sure that sge_supp_len can be handled by the driver */ 9566 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9567 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9568 9569 return rc; 9570 } 9571 9572 /** 9573 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9574 * @phba: Pointer to HBA context object. 9575 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9576 * 9577 * This function is called in the SLI4 code path to read the port's 9578 * sli4 capabilities. 9579 * 9580 * This function may be be called from any context that can block-wait 9581 * for the completion. The expectation is that this routine is called 9582 * typically from probe_one or from the online routine. 9583 **/ 9584 int 9585 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9586 { 9587 int rc; 9588 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9589 struct lpfc_pc_sli4_params *sli4_params; 9590 uint32_t mbox_tmo; 9591 int length; 9592 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9593 9594 /* 9595 * By default, the driver assumes the SLI4 port requires RPI 9596 * header postings. The SLI4_PARAM response will correct this 9597 * assumption. 9598 */ 9599 phba->sli4_hba.rpi_hdrs_in_use = 1; 9600 9601 /* Read the port's SLI4 Config Parameters */ 9602 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9603 sizeof(struct lpfc_sli4_cfg_mhdr)); 9604 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9605 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9606 length, LPFC_SLI4_MBX_EMBED); 9607 if (!phba->sli4_hba.intr_enable) 9608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9609 else { 9610 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9611 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9612 } 9613 if (unlikely(rc)) 9614 return rc; 9615 sli4_params = &phba->sli4_hba.pc_sli4_params; 9616 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9617 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9618 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9619 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9620 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9621 mbx_sli4_parameters); 9622 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9623 mbx_sli4_parameters); 9624 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9625 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9626 else 9627 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9628 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9629 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9630 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9631 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9632 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9633 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9634 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9635 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9636 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9637 mbx_sli4_parameters); 9638 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9639 mbx_sli4_parameters); 9640 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9641 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9642 9643 /* Make sure that sge_supp_len can be handled by the driver */ 9644 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9645 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9646 9647 /* 9648 * Issue IOs with CDB embedded in WQE to minimized the number 9649 * of DMAs the firmware has to do. Setting this to 1 also forces 9650 * the driver to use 128 bytes WQEs for FCP IOs. 9651 */ 9652 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 9653 phba->fcp_embed_io = 1; 9654 else 9655 phba->fcp_embed_io = 0; 9656 9657 /* 9658 * Check if the SLI port supports MDS Diagnostics 9659 */ 9660 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 9661 phba->mds_diags_support = 1; 9662 else 9663 phba->mds_diags_support = 0; 9664 return 0; 9665 } 9666 9667 /** 9668 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9669 * @pdev: pointer to PCI device 9670 * @pid: pointer to PCI device identifier 9671 * 9672 * This routine is to be called to attach a device with SLI-3 interface spec 9673 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9674 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9675 * information of the device and driver to see if the driver state that it can 9676 * support this kind of device. If the match is successful, the driver core 9677 * invokes this routine. If this routine determines it can claim the HBA, it 9678 * does all the initialization that it needs to do to handle the HBA properly. 9679 * 9680 * Return code 9681 * 0 - driver can claim the device 9682 * negative value - driver can not claim the device 9683 **/ 9684 static int 9685 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9686 { 9687 struct lpfc_hba *phba; 9688 struct lpfc_vport *vport = NULL; 9689 struct Scsi_Host *shost = NULL; 9690 int error; 9691 uint32_t cfg_mode, intr_mode; 9692 9693 /* Allocate memory for HBA structure */ 9694 phba = lpfc_hba_alloc(pdev); 9695 if (!phba) 9696 return -ENOMEM; 9697 9698 /* Perform generic PCI device enabling operation */ 9699 error = lpfc_enable_pci_dev(phba); 9700 if (error) 9701 goto out_free_phba; 9702 9703 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9704 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9705 if (error) 9706 goto out_disable_pci_dev; 9707 9708 /* Set up SLI-3 specific device PCI memory space */ 9709 error = lpfc_sli_pci_mem_setup(phba); 9710 if (error) { 9711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9712 "1402 Failed to set up pci memory space.\n"); 9713 goto out_disable_pci_dev; 9714 } 9715 9716 /* Set up phase-1 common device driver resources */ 9717 error = lpfc_setup_driver_resource_phase1(phba); 9718 if (error) { 9719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9720 "1403 Failed to set up driver resource.\n"); 9721 goto out_unset_pci_mem_s3; 9722 } 9723 9724 /* Set up SLI-3 specific device driver resources */ 9725 error = lpfc_sli_driver_resource_setup(phba); 9726 if (error) { 9727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9728 "1404 Failed to set up driver resource.\n"); 9729 goto out_unset_pci_mem_s3; 9730 } 9731 9732 /* Initialize and populate the iocb list per host */ 9733 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9734 if (error) { 9735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9736 "1405 Failed to initialize iocb list.\n"); 9737 goto out_unset_driver_resource_s3; 9738 } 9739 9740 /* Set up common device driver resources */ 9741 error = lpfc_setup_driver_resource_phase2(phba); 9742 if (error) { 9743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9744 "1406 Failed to set up driver resource.\n"); 9745 goto out_free_iocb_list; 9746 } 9747 9748 /* Get the default values for Model Name and Description */ 9749 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9750 9751 /* Create SCSI host to the physical port */ 9752 error = lpfc_create_shost(phba); 9753 if (error) { 9754 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9755 "1407 Failed to create scsi host.\n"); 9756 goto out_unset_driver_resource; 9757 } 9758 9759 /* Configure sysfs attributes */ 9760 vport = phba->pport; 9761 error = lpfc_alloc_sysfs_attr(vport); 9762 if (error) { 9763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9764 "1476 Failed to allocate sysfs attr\n"); 9765 goto out_destroy_shost; 9766 } 9767 9768 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9769 /* Now, trying to enable interrupt and bring up the device */ 9770 cfg_mode = phba->cfg_use_msi; 9771 while (true) { 9772 /* Put device to a known state before enabling interrupt */ 9773 lpfc_stop_port(phba); 9774 /* Configure and enable interrupt */ 9775 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9776 if (intr_mode == LPFC_INTR_ERROR) { 9777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9778 "0431 Failed to enable interrupt.\n"); 9779 error = -ENODEV; 9780 goto out_free_sysfs_attr; 9781 } 9782 /* SLI-3 HBA setup */ 9783 if (lpfc_sli_hba_setup(phba)) { 9784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9785 "1477 Failed to set up hba\n"); 9786 error = -ENODEV; 9787 goto out_remove_device; 9788 } 9789 9790 /* Wait 50ms for the interrupts of previous mailbox commands */ 9791 msleep(50); 9792 /* Check active interrupts on message signaled interrupts */ 9793 if (intr_mode == 0 || 9794 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9795 /* Log the current active interrupt mode */ 9796 phba->intr_mode = intr_mode; 9797 lpfc_log_intr_mode(phba, intr_mode); 9798 break; 9799 } else { 9800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9801 "0447 Configure interrupt mode (%d) " 9802 "failed active interrupt test.\n", 9803 intr_mode); 9804 /* Disable the current interrupt mode */ 9805 lpfc_sli_disable_intr(phba); 9806 /* Try next level of interrupt mode */ 9807 cfg_mode = --intr_mode; 9808 } 9809 } 9810 9811 /* Perform post initialization setup */ 9812 lpfc_post_init_setup(phba); 9813 9814 /* Check if there are static vports to be created. */ 9815 lpfc_create_static_vport(phba); 9816 9817 return 0; 9818 9819 out_remove_device: 9820 lpfc_unset_hba(phba); 9821 out_free_sysfs_attr: 9822 lpfc_free_sysfs_attr(vport); 9823 out_destroy_shost: 9824 lpfc_destroy_shost(phba); 9825 out_unset_driver_resource: 9826 lpfc_unset_driver_resource_phase2(phba); 9827 out_free_iocb_list: 9828 lpfc_free_iocb_list(phba); 9829 out_unset_driver_resource_s3: 9830 lpfc_sli_driver_resource_unset(phba); 9831 out_unset_pci_mem_s3: 9832 lpfc_sli_pci_mem_unset(phba); 9833 out_disable_pci_dev: 9834 lpfc_disable_pci_dev(phba); 9835 if (shost) 9836 scsi_host_put(shost); 9837 out_free_phba: 9838 lpfc_hba_free(phba); 9839 return error; 9840 } 9841 9842 /** 9843 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9844 * @pdev: pointer to PCI device 9845 * 9846 * This routine is to be called to disattach a device with SLI-3 interface 9847 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9848 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9849 * device to be removed from the PCI subsystem properly. 9850 **/ 9851 static void 9852 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9853 { 9854 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9855 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9856 struct lpfc_vport **vports; 9857 struct lpfc_hba *phba = vport->phba; 9858 int i; 9859 9860 spin_lock_irq(&phba->hbalock); 9861 vport->load_flag |= FC_UNLOADING; 9862 spin_unlock_irq(&phba->hbalock); 9863 9864 lpfc_free_sysfs_attr(vport); 9865 9866 /* Release all the vports against this physical port */ 9867 vports = lpfc_create_vport_work_array(phba); 9868 if (vports != NULL) 9869 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9870 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9871 continue; 9872 fc_vport_terminate(vports[i]->fc_vport); 9873 } 9874 lpfc_destroy_vport_work_array(phba, vports); 9875 9876 /* Remove FC host and then SCSI host with the physical port */ 9877 fc_remove_host(shost); 9878 scsi_remove_host(shost); 9879 lpfc_cleanup(vport); 9880 9881 /* 9882 * Bring down the SLI Layer. This step disable all interrupts, 9883 * clears the rings, discards all mailbox commands, and resets 9884 * the HBA. 9885 */ 9886 9887 /* HBA interrupt will be disabled after this call */ 9888 lpfc_sli_hba_down(phba); 9889 /* Stop kthread signal shall trigger work_done one more time */ 9890 kthread_stop(phba->worker_thread); 9891 /* Final cleanup of txcmplq and reset the HBA */ 9892 lpfc_sli_brdrestart(phba); 9893 9894 kfree(phba->vpi_bmask); 9895 kfree(phba->vpi_ids); 9896 9897 lpfc_stop_hba_timers(phba); 9898 spin_lock_irq(&phba->hbalock); 9899 list_del_init(&vport->listentry); 9900 spin_unlock_irq(&phba->hbalock); 9901 9902 lpfc_debugfs_terminate(vport); 9903 9904 /* Disable SR-IOV if enabled */ 9905 if (phba->cfg_sriov_nr_virtfn) 9906 pci_disable_sriov(pdev); 9907 9908 /* Disable interrupt */ 9909 lpfc_sli_disable_intr(phba); 9910 9911 scsi_host_put(shost); 9912 9913 /* 9914 * Call scsi_free before mem_free since scsi bufs are released to their 9915 * corresponding pools here. 9916 */ 9917 lpfc_scsi_free(phba); 9918 lpfc_mem_free_all(phba); 9919 9920 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9921 phba->hbqslimp.virt, phba->hbqslimp.phys); 9922 9923 /* Free resources associated with SLI2 interface */ 9924 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9925 phba->slim2p.virt, phba->slim2p.phys); 9926 9927 /* unmap adapter SLIM and Control Registers */ 9928 iounmap(phba->ctrl_regs_memmap_p); 9929 iounmap(phba->slim_memmap_p); 9930 9931 lpfc_hba_free(phba); 9932 9933 pci_release_mem_regions(pdev); 9934 pci_disable_device(pdev); 9935 } 9936 9937 /** 9938 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9939 * @pdev: pointer to PCI device 9940 * @msg: power management message 9941 * 9942 * This routine is to be called from the kernel's PCI subsystem to support 9943 * system Power Management (PM) to device with SLI-3 interface spec. When 9944 * PM invokes this method, it quiesces the device by stopping the driver's 9945 * worker thread for the device, turning off device's interrupt and DMA, 9946 * and bring the device offline. Note that as the driver implements the 9947 * minimum PM requirements to a power-aware driver's PM support for the 9948 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9949 * to the suspend() method call will be treated as SUSPEND and the driver will 9950 * fully reinitialize its device during resume() method call, the driver will 9951 * set device to PCI_D3hot state in PCI config space instead of setting it 9952 * according to the @msg provided by the PM. 9953 * 9954 * Return code 9955 * 0 - driver suspended the device 9956 * Error otherwise 9957 **/ 9958 static int 9959 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9960 { 9961 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9962 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9963 9964 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9965 "0473 PCI device Power Management suspend.\n"); 9966 9967 /* Bring down the device */ 9968 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9969 lpfc_offline(phba); 9970 kthread_stop(phba->worker_thread); 9971 9972 /* Disable interrupt from device */ 9973 lpfc_sli_disable_intr(phba); 9974 9975 /* Save device state to PCI config space */ 9976 pci_save_state(pdev); 9977 pci_set_power_state(pdev, PCI_D3hot); 9978 9979 return 0; 9980 } 9981 9982 /** 9983 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9984 * @pdev: pointer to PCI device 9985 * 9986 * This routine is to be called from the kernel's PCI subsystem to support 9987 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9988 * invokes this method, it restores the device's PCI config space state and 9989 * fully reinitializes the device and brings it online. Note that as the 9990 * driver implements the minimum PM requirements to a power-aware driver's 9991 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9992 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9993 * driver will fully reinitialize its device during resume() method call, 9994 * the device will be set to PCI_D0 directly in PCI config space before 9995 * restoring the state. 9996 * 9997 * Return code 9998 * 0 - driver suspended the device 9999 * Error otherwise 10000 **/ 10001 static int 10002 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 10003 { 10004 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10005 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10006 uint32_t intr_mode; 10007 int error; 10008 10009 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10010 "0452 PCI device Power Management resume.\n"); 10011 10012 /* Restore device state from PCI config space */ 10013 pci_set_power_state(pdev, PCI_D0); 10014 pci_restore_state(pdev); 10015 10016 /* 10017 * As the new kernel behavior of pci_restore_state() API call clears 10018 * device saved_state flag, need to save the restored state again. 10019 */ 10020 pci_save_state(pdev); 10021 10022 if (pdev->is_busmaster) 10023 pci_set_master(pdev); 10024 10025 /* Startup the kernel thread for this host adapter. */ 10026 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10027 "lpfc_worker_%d", phba->brd_no); 10028 if (IS_ERR(phba->worker_thread)) { 10029 error = PTR_ERR(phba->worker_thread); 10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10031 "0434 PM resume failed to start worker " 10032 "thread: error=x%x.\n", error); 10033 return error; 10034 } 10035 10036 /* Configure and enable interrupt */ 10037 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10038 if (intr_mode == LPFC_INTR_ERROR) { 10039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10040 "0430 PM resume Failed to enable interrupt\n"); 10041 return -EIO; 10042 } else 10043 phba->intr_mode = intr_mode; 10044 10045 /* Restart HBA and bring it online */ 10046 lpfc_sli_brdrestart(phba); 10047 lpfc_online(phba); 10048 10049 /* Log the current active interrupt mode */ 10050 lpfc_log_intr_mode(phba, phba->intr_mode); 10051 10052 return 0; 10053 } 10054 10055 /** 10056 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 10057 * @phba: pointer to lpfc hba data structure. 10058 * 10059 * This routine is called to prepare the SLI3 device for PCI slot recover. It 10060 * aborts all the outstanding SCSI I/Os to the pci device. 10061 **/ 10062 static void 10063 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 10064 { 10065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10066 "2723 PCI channel I/O abort preparing for recovery\n"); 10067 10068 /* 10069 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10070 * and let the SCSI mid-layer to retry them to recover. 10071 */ 10072 lpfc_sli_abort_fcp_rings(phba); 10073 } 10074 10075 /** 10076 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 10077 * @phba: pointer to lpfc hba data structure. 10078 * 10079 * This routine is called to prepare the SLI3 device for PCI slot reset. It 10080 * disables the device interrupt and pci device, and aborts the internal FCP 10081 * pending I/Os. 10082 **/ 10083 static void 10084 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 10085 { 10086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10087 "2710 PCI channel disable preparing for reset\n"); 10088 10089 /* Block any management I/Os to the device */ 10090 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 10091 10092 /* Block all SCSI devices' I/Os on the host */ 10093 lpfc_scsi_dev_block(phba); 10094 10095 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10096 lpfc_sli_flush_fcp_rings(phba); 10097 10098 /* stop all timers */ 10099 lpfc_stop_hba_timers(phba); 10100 10101 /* Disable interrupt and pci device */ 10102 lpfc_sli_disable_intr(phba); 10103 pci_disable_device(phba->pcidev); 10104 } 10105 10106 /** 10107 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 10108 * @phba: pointer to lpfc hba data structure. 10109 * 10110 * This routine is called to prepare the SLI3 device for PCI slot permanently 10111 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10112 * pending I/Os. 10113 **/ 10114 static void 10115 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10116 { 10117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10118 "2711 PCI channel permanent disable for failure\n"); 10119 /* Block all SCSI devices' I/Os on the host */ 10120 lpfc_scsi_dev_block(phba); 10121 10122 /* stop all timers */ 10123 lpfc_stop_hba_timers(phba); 10124 10125 /* Clean up all driver's outstanding SCSI I/Os */ 10126 lpfc_sli_flush_fcp_rings(phba); 10127 } 10128 10129 /** 10130 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 10131 * @pdev: pointer to PCI device. 10132 * @state: the current PCI connection state. 10133 * 10134 * This routine is called from the PCI subsystem for I/O error handling to 10135 * device with SLI-3 interface spec. This function is called by the PCI 10136 * subsystem after a PCI bus error affecting this device has been detected. 10137 * When this function is invoked, it will need to stop all the I/Os and 10138 * interrupt(s) to the device. Once that is done, it will return 10139 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 10140 * as desired. 10141 * 10142 * Return codes 10143 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 10144 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10145 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10146 **/ 10147 static pci_ers_result_t 10148 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 10149 { 10150 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10151 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10152 10153 switch (state) { 10154 case pci_channel_io_normal: 10155 /* Non-fatal error, prepare for recovery */ 10156 lpfc_sli_prep_dev_for_recover(phba); 10157 return PCI_ERS_RESULT_CAN_RECOVER; 10158 case pci_channel_io_frozen: 10159 /* Fatal error, prepare for slot reset */ 10160 lpfc_sli_prep_dev_for_reset(phba); 10161 return PCI_ERS_RESULT_NEED_RESET; 10162 case pci_channel_io_perm_failure: 10163 /* Permanent failure, prepare for device down */ 10164 lpfc_sli_prep_dev_for_perm_failure(phba); 10165 return PCI_ERS_RESULT_DISCONNECT; 10166 default: 10167 /* Unknown state, prepare and request slot reset */ 10168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10169 "0472 Unknown PCI error state: x%x\n", state); 10170 lpfc_sli_prep_dev_for_reset(phba); 10171 return PCI_ERS_RESULT_NEED_RESET; 10172 } 10173 } 10174 10175 /** 10176 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 10177 * @pdev: pointer to PCI device. 10178 * 10179 * This routine is called from the PCI subsystem for error handling to 10180 * device with SLI-3 interface spec. This is called after PCI bus has been 10181 * reset to restart the PCI card from scratch, as if from a cold-boot. 10182 * During the PCI subsystem error recovery, after driver returns 10183 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10184 * recovery and then call this routine before calling the .resume method 10185 * to recover the device. This function will initialize the HBA device, 10186 * enable the interrupt, but it will just put the HBA to offline state 10187 * without passing any I/O traffic. 10188 * 10189 * Return codes 10190 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10191 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10192 */ 10193 static pci_ers_result_t 10194 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 10195 { 10196 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10197 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10198 struct lpfc_sli *psli = &phba->sli; 10199 uint32_t intr_mode; 10200 10201 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10202 if (pci_enable_device_mem(pdev)) { 10203 printk(KERN_ERR "lpfc: Cannot re-enable " 10204 "PCI device after reset.\n"); 10205 return PCI_ERS_RESULT_DISCONNECT; 10206 } 10207 10208 pci_restore_state(pdev); 10209 10210 /* 10211 * As the new kernel behavior of pci_restore_state() API call clears 10212 * device saved_state flag, need to save the restored state again. 10213 */ 10214 pci_save_state(pdev); 10215 10216 if (pdev->is_busmaster) 10217 pci_set_master(pdev); 10218 10219 spin_lock_irq(&phba->hbalock); 10220 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10221 spin_unlock_irq(&phba->hbalock); 10222 10223 /* Configure and enable interrupt */ 10224 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10225 if (intr_mode == LPFC_INTR_ERROR) { 10226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10227 "0427 Cannot re-enable interrupt after " 10228 "slot reset.\n"); 10229 return PCI_ERS_RESULT_DISCONNECT; 10230 } else 10231 phba->intr_mode = intr_mode; 10232 10233 /* Take device offline, it will perform cleanup */ 10234 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10235 lpfc_offline(phba); 10236 lpfc_sli_brdrestart(phba); 10237 10238 /* Log the current active interrupt mode */ 10239 lpfc_log_intr_mode(phba, phba->intr_mode); 10240 10241 return PCI_ERS_RESULT_RECOVERED; 10242 } 10243 10244 /** 10245 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 10246 * @pdev: pointer to PCI device 10247 * 10248 * This routine is called from the PCI subsystem for error handling to device 10249 * with SLI-3 interface spec. It is called when kernel error recovery tells 10250 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10251 * error recovery. After this call, traffic can start to flow from this device 10252 * again. 10253 */ 10254 static void 10255 lpfc_io_resume_s3(struct pci_dev *pdev) 10256 { 10257 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10258 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10259 10260 /* Bring device online, it will be no-op for non-fatal error resume */ 10261 lpfc_online(phba); 10262 10263 /* Clean up Advanced Error Reporting (AER) if needed */ 10264 if (phba->hba_flag & HBA_AER_ENABLED) 10265 pci_cleanup_aer_uncorrect_error_status(pdev); 10266 } 10267 10268 /** 10269 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 10270 * @phba: pointer to lpfc hba data structure. 10271 * 10272 * returns the number of ELS/CT IOCBs to reserve 10273 **/ 10274 int 10275 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 10276 { 10277 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 10278 10279 if (phba->sli_rev == LPFC_SLI_REV4) { 10280 if (max_xri <= 100) 10281 return 10; 10282 else if (max_xri <= 256) 10283 return 25; 10284 else if (max_xri <= 512) 10285 return 50; 10286 else if (max_xri <= 1024) 10287 return 100; 10288 else if (max_xri <= 1536) 10289 return 150; 10290 else if (max_xri <= 2048) 10291 return 200; 10292 else 10293 return 250; 10294 } else 10295 return 0; 10296 } 10297 10298 /** 10299 * lpfc_write_firmware - attempt to write a firmware image to the port 10300 * @fw: pointer to firmware image returned from request_firmware. 10301 * @phba: pointer to lpfc hba data structure. 10302 * 10303 **/ 10304 static void 10305 lpfc_write_firmware(const struct firmware *fw, void *context) 10306 { 10307 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10308 char fwrev[FW_REV_STR_SIZE]; 10309 struct lpfc_grp_hdr *image; 10310 struct list_head dma_buffer_list; 10311 int i, rc = 0; 10312 struct lpfc_dmabuf *dmabuf, *next; 10313 uint32_t offset = 0, temp_offset = 0; 10314 uint32_t magic_number, ftype, fid, fsize; 10315 10316 /* It can be null in no-wait mode, sanity check */ 10317 if (!fw) { 10318 rc = -ENXIO; 10319 goto out; 10320 } 10321 image = (struct lpfc_grp_hdr *)fw->data; 10322 10323 magic_number = be32_to_cpu(image->magic_number); 10324 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image); 10325 fid = bf_get_be32(lpfc_grp_hdr_id, image), 10326 fsize = be32_to_cpu(image->size); 10327 10328 INIT_LIST_HEAD(&dma_buffer_list); 10329 if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 && 10330 magic_number != LPFC_GROUP_OJECT_MAGIC_G6) || 10331 ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) { 10332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10333 "3022 Invalid FW image found. " 10334 "Magic:%x Type:%x ID:%x Size %d %zd\n", 10335 magic_number, ftype, fid, fsize, fw->size); 10336 rc = -EINVAL; 10337 goto release_out; 10338 } 10339 lpfc_decode_firmware_rev(phba, fwrev, 1); 10340 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10342 "3023 Updating Firmware, Current Version:%s " 10343 "New Version:%s\n", 10344 fwrev, image->revision); 10345 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10346 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10347 GFP_KERNEL); 10348 if (!dmabuf) { 10349 rc = -ENOMEM; 10350 goto release_out; 10351 } 10352 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10353 SLI4_PAGE_SIZE, 10354 &dmabuf->phys, 10355 GFP_KERNEL); 10356 if (!dmabuf->virt) { 10357 kfree(dmabuf); 10358 rc = -ENOMEM; 10359 goto release_out; 10360 } 10361 list_add_tail(&dmabuf->list, &dma_buffer_list); 10362 } 10363 while (offset < fw->size) { 10364 temp_offset = offset; 10365 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10366 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10367 memcpy(dmabuf->virt, 10368 fw->data + temp_offset, 10369 fw->size - temp_offset); 10370 temp_offset = fw->size; 10371 break; 10372 } 10373 memcpy(dmabuf->virt, fw->data + temp_offset, 10374 SLI4_PAGE_SIZE); 10375 temp_offset += SLI4_PAGE_SIZE; 10376 } 10377 rc = lpfc_wr_object(phba, &dma_buffer_list, 10378 (fw->size - offset), &offset); 10379 if (rc) 10380 goto release_out; 10381 } 10382 rc = offset; 10383 } 10384 10385 release_out: 10386 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10387 list_del(&dmabuf->list); 10388 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 10389 dmabuf->virt, dmabuf->phys); 10390 kfree(dmabuf); 10391 } 10392 release_firmware(fw); 10393 out: 10394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10395 "3024 Firmware update done: %d.\n", rc); 10396 return; 10397 } 10398 10399 /** 10400 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10401 * @phba: pointer to lpfc hba data structure. 10402 * 10403 * This routine is called to perform Linux generic firmware upgrade on device 10404 * that supports such feature. 10405 **/ 10406 int 10407 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10408 { 10409 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10410 int ret; 10411 const struct firmware *fw; 10412 10413 /* Only supported on SLI4 interface type 2 for now */ 10414 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10415 LPFC_SLI_INTF_IF_TYPE_2) 10416 return -EPERM; 10417 10418 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10419 10420 if (fw_upgrade == INT_FW_UPGRADE) { 10421 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10422 file_name, &phba->pcidev->dev, 10423 GFP_KERNEL, (void *)phba, 10424 lpfc_write_firmware); 10425 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10426 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10427 if (!ret) 10428 lpfc_write_firmware(fw, (void *)phba); 10429 } else { 10430 ret = -EINVAL; 10431 } 10432 10433 return ret; 10434 } 10435 10436 /** 10437 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10438 * @pdev: pointer to PCI device 10439 * @pid: pointer to PCI device identifier 10440 * 10441 * This routine is called from the kernel's PCI subsystem to device with 10442 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10443 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10444 * information of the device and driver to see if the driver state that it 10445 * can support this kind of device. If the match is successful, the driver 10446 * core invokes this routine. If this routine determines it can claim the HBA, 10447 * it does all the initialization that it needs to do to handle the HBA 10448 * properly. 10449 * 10450 * Return code 10451 * 0 - driver can claim the device 10452 * negative value - driver can not claim the device 10453 **/ 10454 static int 10455 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10456 { 10457 struct lpfc_hba *phba; 10458 struct lpfc_vport *vport = NULL; 10459 struct Scsi_Host *shost = NULL; 10460 int error; 10461 uint32_t cfg_mode, intr_mode; 10462 int adjusted_fcp_io_channel; 10463 10464 /* Allocate memory for HBA structure */ 10465 phba = lpfc_hba_alloc(pdev); 10466 if (!phba) 10467 return -ENOMEM; 10468 10469 /* Perform generic PCI device enabling operation */ 10470 error = lpfc_enable_pci_dev(phba); 10471 if (error) 10472 goto out_free_phba; 10473 10474 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10475 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10476 if (error) 10477 goto out_disable_pci_dev; 10478 10479 /* Set up SLI-4 specific device PCI memory space */ 10480 error = lpfc_sli4_pci_mem_setup(phba); 10481 if (error) { 10482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10483 "1410 Failed to set up pci memory space.\n"); 10484 goto out_disable_pci_dev; 10485 } 10486 10487 /* Set up phase-1 common device driver resources */ 10488 error = lpfc_setup_driver_resource_phase1(phba); 10489 if (error) { 10490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10491 "1411 Failed to set up driver resource.\n"); 10492 goto out_unset_pci_mem_s4; 10493 } 10494 10495 /* Set up SLI-4 Specific device driver resources */ 10496 error = lpfc_sli4_driver_resource_setup(phba); 10497 if (error) { 10498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10499 "1412 Failed to set up driver resource.\n"); 10500 goto out_unset_pci_mem_s4; 10501 } 10502 10503 /* Initialize and populate the iocb list per host */ 10504 10505 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10506 "2821 initialize iocb list %d.\n", 10507 phba->cfg_iocb_cnt*1024); 10508 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10509 10510 if (error) { 10511 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10512 "1413 Failed to initialize iocb list.\n"); 10513 goto out_unset_driver_resource_s4; 10514 } 10515 10516 INIT_LIST_HEAD(&phba->active_rrq_list); 10517 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10518 10519 /* Set up common device driver resources */ 10520 error = lpfc_setup_driver_resource_phase2(phba); 10521 if (error) { 10522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10523 "1414 Failed to set up driver resource.\n"); 10524 goto out_free_iocb_list; 10525 } 10526 10527 /* Get the default values for Model Name and Description */ 10528 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10529 10530 /* Create SCSI host to the physical port */ 10531 error = lpfc_create_shost(phba); 10532 if (error) { 10533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10534 "1415 Failed to create scsi host.\n"); 10535 goto out_unset_driver_resource; 10536 } 10537 10538 /* Configure sysfs attributes */ 10539 vport = phba->pport; 10540 error = lpfc_alloc_sysfs_attr(vport); 10541 if (error) { 10542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10543 "1416 Failed to allocate sysfs attr\n"); 10544 goto out_destroy_shost; 10545 } 10546 10547 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10548 /* Now, trying to enable interrupt and bring up the device */ 10549 cfg_mode = phba->cfg_use_msi; 10550 10551 /* Put device to a known state before enabling interrupt */ 10552 lpfc_stop_port(phba); 10553 /* Configure and enable interrupt */ 10554 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10555 if (intr_mode == LPFC_INTR_ERROR) { 10556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10557 "0426 Failed to enable interrupt.\n"); 10558 error = -ENODEV; 10559 goto out_free_sysfs_attr; 10560 } 10561 /* Default to single EQ for non-MSI-X */ 10562 if (phba->intr_type != MSIX) 10563 adjusted_fcp_io_channel = 1; 10564 else 10565 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10566 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10567 /* Set up SLI-4 HBA */ 10568 if (lpfc_sli4_hba_setup(phba)) { 10569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10570 "1421 Failed to set up hba\n"); 10571 error = -ENODEV; 10572 goto out_disable_intr; 10573 } 10574 10575 /* Log the current active interrupt mode */ 10576 phba->intr_mode = intr_mode; 10577 lpfc_log_intr_mode(phba, intr_mode); 10578 10579 /* Perform post initialization setup */ 10580 lpfc_post_init_setup(phba); 10581 10582 /* check for firmware upgrade or downgrade */ 10583 if (phba->cfg_request_firmware_upgrade) 10584 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10585 10586 /* Check if there are static vports to be created. */ 10587 lpfc_create_static_vport(phba); 10588 return 0; 10589 10590 out_disable_intr: 10591 lpfc_sli4_disable_intr(phba); 10592 out_free_sysfs_attr: 10593 lpfc_free_sysfs_attr(vport); 10594 out_destroy_shost: 10595 lpfc_destroy_shost(phba); 10596 out_unset_driver_resource: 10597 lpfc_unset_driver_resource_phase2(phba); 10598 out_free_iocb_list: 10599 lpfc_free_iocb_list(phba); 10600 out_unset_driver_resource_s4: 10601 lpfc_sli4_driver_resource_unset(phba); 10602 out_unset_pci_mem_s4: 10603 lpfc_sli4_pci_mem_unset(phba); 10604 out_disable_pci_dev: 10605 lpfc_disable_pci_dev(phba); 10606 if (shost) 10607 scsi_host_put(shost); 10608 out_free_phba: 10609 lpfc_hba_free(phba); 10610 return error; 10611 } 10612 10613 /** 10614 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10615 * @pdev: pointer to PCI device 10616 * 10617 * This routine is called from the kernel's PCI subsystem to device with 10618 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10619 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10620 * device to be removed from the PCI subsystem properly. 10621 **/ 10622 static void 10623 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10624 { 10625 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10626 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10627 struct lpfc_vport **vports; 10628 struct lpfc_hba *phba = vport->phba; 10629 int i; 10630 10631 /* Mark the device unloading flag */ 10632 spin_lock_irq(&phba->hbalock); 10633 vport->load_flag |= FC_UNLOADING; 10634 spin_unlock_irq(&phba->hbalock); 10635 10636 /* Free the HBA sysfs attributes */ 10637 lpfc_free_sysfs_attr(vport); 10638 10639 /* Release all the vports against this physical port */ 10640 vports = lpfc_create_vport_work_array(phba); 10641 if (vports != NULL) 10642 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10643 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10644 continue; 10645 fc_vport_terminate(vports[i]->fc_vport); 10646 } 10647 lpfc_destroy_vport_work_array(phba, vports); 10648 10649 /* Remove FC host and then SCSI host with the physical port */ 10650 fc_remove_host(shost); 10651 scsi_remove_host(shost); 10652 10653 /* Perform cleanup on the physical port */ 10654 lpfc_cleanup(vport); 10655 10656 /* 10657 * Bring down the SLI Layer. This step disables all interrupts, 10658 * clears the rings, discards all mailbox commands, and resets 10659 * the HBA FCoE function. 10660 */ 10661 lpfc_debugfs_terminate(vport); 10662 lpfc_sli4_hba_unset(phba); 10663 10664 spin_lock_irq(&phba->hbalock); 10665 list_del_init(&vport->listentry); 10666 spin_unlock_irq(&phba->hbalock); 10667 10668 /* Perform scsi free before driver resource_unset since scsi 10669 * buffers are released to their corresponding pools here. 10670 */ 10671 lpfc_scsi_free(phba); 10672 10673 lpfc_sli4_driver_resource_unset(phba); 10674 10675 /* Unmap adapter Control and Doorbell registers */ 10676 lpfc_sli4_pci_mem_unset(phba); 10677 10678 /* Release PCI resources and disable device's PCI function */ 10679 scsi_host_put(shost); 10680 lpfc_disable_pci_dev(phba); 10681 10682 /* Finally, free the driver's device data structure */ 10683 lpfc_hba_free(phba); 10684 10685 return; 10686 } 10687 10688 /** 10689 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10690 * @pdev: pointer to PCI device 10691 * @msg: power management message 10692 * 10693 * This routine is called from the kernel's PCI subsystem to support system 10694 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10695 * this method, it quiesces the device by stopping the driver's worker 10696 * thread for the device, turning off device's interrupt and DMA, and bring 10697 * the device offline. Note that as the driver implements the minimum PM 10698 * requirements to a power-aware driver's PM support for suspend/resume -- all 10699 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10700 * method call will be treated as SUSPEND and the driver will fully 10701 * reinitialize its device during resume() method call, the driver will set 10702 * device to PCI_D3hot state in PCI config space instead of setting it 10703 * according to the @msg provided by the PM. 10704 * 10705 * Return code 10706 * 0 - driver suspended the device 10707 * Error otherwise 10708 **/ 10709 static int 10710 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10711 { 10712 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10713 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10714 10715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10716 "2843 PCI device Power Management suspend.\n"); 10717 10718 /* Bring down the device */ 10719 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10720 lpfc_offline(phba); 10721 kthread_stop(phba->worker_thread); 10722 10723 /* Disable interrupt from device */ 10724 lpfc_sli4_disable_intr(phba); 10725 lpfc_sli4_queue_destroy(phba); 10726 10727 /* Save device state to PCI config space */ 10728 pci_save_state(pdev); 10729 pci_set_power_state(pdev, PCI_D3hot); 10730 10731 return 0; 10732 } 10733 10734 /** 10735 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10736 * @pdev: pointer to PCI device 10737 * 10738 * This routine is called from the kernel's PCI subsystem to support system 10739 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10740 * this method, it restores the device's PCI config space state and fully 10741 * reinitializes the device and brings it online. Note that as the driver 10742 * implements the minimum PM requirements to a power-aware driver's PM for 10743 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10744 * to the suspend() method call will be treated as SUSPEND and the driver 10745 * will fully reinitialize its device during resume() method call, the device 10746 * will be set to PCI_D0 directly in PCI config space before restoring the 10747 * state. 10748 * 10749 * Return code 10750 * 0 - driver suspended the device 10751 * Error otherwise 10752 **/ 10753 static int 10754 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10755 { 10756 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10757 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10758 uint32_t intr_mode; 10759 int error; 10760 10761 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10762 "0292 PCI device Power Management resume.\n"); 10763 10764 /* Restore device state from PCI config space */ 10765 pci_set_power_state(pdev, PCI_D0); 10766 pci_restore_state(pdev); 10767 10768 /* 10769 * As the new kernel behavior of pci_restore_state() API call clears 10770 * device saved_state flag, need to save the restored state again. 10771 */ 10772 pci_save_state(pdev); 10773 10774 if (pdev->is_busmaster) 10775 pci_set_master(pdev); 10776 10777 /* Startup the kernel thread for this host adapter. */ 10778 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10779 "lpfc_worker_%d", phba->brd_no); 10780 if (IS_ERR(phba->worker_thread)) { 10781 error = PTR_ERR(phba->worker_thread); 10782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10783 "0293 PM resume failed to start worker " 10784 "thread: error=x%x.\n", error); 10785 return error; 10786 } 10787 10788 /* Configure and enable interrupt */ 10789 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10790 if (intr_mode == LPFC_INTR_ERROR) { 10791 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10792 "0294 PM resume Failed to enable interrupt\n"); 10793 return -EIO; 10794 } else 10795 phba->intr_mode = intr_mode; 10796 10797 /* Restart HBA and bring it online */ 10798 lpfc_sli_brdrestart(phba); 10799 lpfc_online(phba); 10800 10801 /* Log the current active interrupt mode */ 10802 lpfc_log_intr_mode(phba, phba->intr_mode); 10803 10804 return 0; 10805 } 10806 10807 /** 10808 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10809 * @phba: pointer to lpfc hba data structure. 10810 * 10811 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10812 * aborts all the outstanding SCSI I/Os to the pci device. 10813 **/ 10814 static void 10815 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10816 { 10817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10818 "2828 PCI channel I/O abort preparing for recovery\n"); 10819 /* 10820 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10821 * and let the SCSI mid-layer to retry them to recover. 10822 */ 10823 lpfc_sli_abort_fcp_rings(phba); 10824 } 10825 10826 /** 10827 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10828 * @phba: pointer to lpfc hba data structure. 10829 * 10830 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10831 * disables the device interrupt and pci device, and aborts the internal FCP 10832 * pending I/Os. 10833 **/ 10834 static void 10835 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10836 { 10837 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10838 "2826 PCI channel disable preparing for reset\n"); 10839 10840 /* Block any management I/Os to the device */ 10841 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10842 10843 /* Block all SCSI devices' I/Os on the host */ 10844 lpfc_scsi_dev_block(phba); 10845 10846 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10847 lpfc_sli_flush_fcp_rings(phba); 10848 10849 /* stop all timers */ 10850 lpfc_stop_hba_timers(phba); 10851 10852 /* Disable interrupt and pci device */ 10853 lpfc_sli4_disable_intr(phba); 10854 lpfc_sli4_queue_destroy(phba); 10855 pci_disable_device(phba->pcidev); 10856 } 10857 10858 /** 10859 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10860 * @phba: pointer to lpfc hba data structure. 10861 * 10862 * This routine is called to prepare the SLI4 device for PCI slot permanently 10863 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10864 * pending I/Os. 10865 **/ 10866 static void 10867 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10868 { 10869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10870 "2827 PCI channel permanent disable for failure\n"); 10871 10872 /* Block all SCSI devices' I/Os on the host */ 10873 lpfc_scsi_dev_block(phba); 10874 10875 /* stop all timers */ 10876 lpfc_stop_hba_timers(phba); 10877 10878 /* Clean up all driver's outstanding SCSI I/Os */ 10879 lpfc_sli_flush_fcp_rings(phba); 10880 } 10881 10882 /** 10883 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10884 * @pdev: pointer to PCI device. 10885 * @state: the current PCI connection state. 10886 * 10887 * This routine is called from the PCI subsystem for error handling to device 10888 * with SLI-4 interface spec. This function is called by the PCI subsystem 10889 * after a PCI bus error affecting this device has been detected. When this 10890 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10891 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10892 * for the PCI subsystem to perform proper recovery as desired. 10893 * 10894 * Return codes 10895 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10896 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10897 **/ 10898 static pci_ers_result_t 10899 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10900 { 10901 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10902 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10903 10904 switch (state) { 10905 case pci_channel_io_normal: 10906 /* Non-fatal error, prepare for recovery */ 10907 lpfc_sli4_prep_dev_for_recover(phba); 10908 return PCI_ERS_RESULT_CAN_RECOVER; 10909 case pci_channel_io_frozen: 10910 /* Fatal error, prepare for slot reset */ 10911 lpfc_sli4_prep_dev_for_reset(phba); 10912 return PCI_ERS_RESULT_NEED_RESET; 10913 case pci_channel_io_perm_failure: 10914 /* Permanent failure, prepare for device down */ 10915 lpfc_sli4_prep_dev_for_perm_failure(phba); 10916 return PCI_ERS_RESULT_DISCONNECT; 10917 default: 10918 /* Unknown state, prepare and request slot reset */ 10919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10920 "2825 Unknown PCI error state: x%x\n", state); 10921 lpfc_sli4_prep_dev_for_reset(phba); 10922 return PCI_ERS_RESULT_NEED_RESET; 10923 } 10924 } 10925 10926 /** 10927 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10928 * @pdev: pointer to PCI device. 10929 * 10930 * This routine is called from the PCI subsystem for error handling to device 10931 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10932 * restart the PCI card from scratch, as if from a cold-boot. During the 10933 * PCI subsystem error recovery, after the driver returns 10934 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10935 * recovery and then call this routine before calling the .resume method to 10936 * recover the device. This function will initialize the HBA device, enable 10937 * the interrupt, but it will just put the HBA to offline state without 10938 * passing any I/O traffic. 10939 * 10940 * Return codes 10941 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10942 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10943 */ 10944 static pci_ers_result_t 10945 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10946 { 10947 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10948 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10949 struct lpfc_sli *psli = &phba->sli; 10950 uint32_t intr_mode; 10951 10952 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10953 if (pci_enable_device_mem(pdev)) { 10954 printk(KERN_ERR "lpfc: Cannot re-enable " 10955 "PCI device after reset.\n"); 10956 return PCI_ERS_RESULT_DISCONNECT; 10957 } 10958 10959 pci_restore_state(pdev); 10960 10961 /* 10962 * As the new kernel behavior of pci_restore_state() API call clears 10963 * device saved_state flag, need to save the restored state again. 10964 */ 10965 pci_save_state(pdev); 10966 10967 if (pdev->is_busmaster) 10968 pci_set_master(pdev); 10969 10970 spin_lock_irq(&phba->hbalock); 10971 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10972 spin_unlock_irq(&phba->hbalock); 10973 10974 /* Configure and enable interrupt */ 10975 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10976 if (intr_mode == LPFC_INTR_ERROR) { 10977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10978 "2824 Cannot re-enable interrupt after " 10979 "slot reset.\n"); 10980 return PCI_ERS_RESULT_DISCONNECT; 10981 } else 10982 phba->intr_mode = intr_mode; 10983 10984 /* Log the current active interrupt mode */ 10985 lpfc_log_intr_mode(phba, phba->intr_mode); 10986 10987 return PCI_ERS_RESULT_RECOVERED; 10988 } 10989 10990 /** 10991 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10992 * @pdev: pointer to PCI device 10993 * 10994 * This routine is called from the PCI subsystem for error handling to device 10995 * with SLI-4 interface spec. It is called when kernel error recovery tells 10996 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10997 * error recovery. After this call, traffic can start to flow from this device 10998 * again. 10999 **/ 11000 static void 11001 lpfc_io_resume_s4(struct pci_dev *pdev) 11002 { 11003 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11004 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11005 11006 /* 11007 * In case of slot reset, as function reset is performed through 11008 * mailbox command which needs DMA to be enabled, this operation 11009 * has to be moved to the io resume phase. Taking device offline 11010 * will perform the necessary cleanup. 11011 */ 11012 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 11013 /* Perform device reset */ 11014 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 11015 lpfc_offline(phba); 11016 lpfc_sli_brdrestart(phba); 11017 /* Bring the device back online */ 11018 lpfc_online(phba); 11019 } 11020 11021 /* Clean up Advanced Error Reporting (AER) if needed */ 11022 if (phba->hba_flag & HBA_AER_ENABLED) 11023 pci_cleanup_aer_uncorrect_error_status(pdev); 11024 } 11025 11026 /** 11027 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 11028 * @pdev: pointer to PCI device 11029 * @pid: pointer to PCI device identifier 11030 * 11031 * This routine is to be registered to the kernel's PCI subsystem. When an 11032 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 11033 * at PCI device-specific information of the device and driver to see if the 11034 * driver state that it can support this kind of device. If the match is 11035 * successful, the driver core invokes this routine. This routine dispatches 11036 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 11037 * do all the initialization that it needs to do to handle the HBA device 11038 * properly. 11039 * 11040 * Return code 11041 * 0 - driver can claim the device 11042 * negative value - driver can not claim the device 11043 **/ 11044 static int 11045 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 11046 { 11047 int rc; 11048 struct lpfc_sli_intf intf; 11049 11050 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 11051 return -ENODEV; 11052 11053 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 11054 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 11055 rc = lpfc_pci_probe_one_s4(pdev, pid); 11056 else 11057 rc = lpfc_pci_probe_one_s3(pdev, pid); 11058 11059 return rc; 11060 } 11061 11062 /** 11063 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 11064 * @pdev: pointer to PCI device 11065 * 11066 * This routine is to be registered to the kernel's PCI subsystem. When an 11067 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 11068 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 11069 * remove routine, which will perform all the necessary cleanup for the 11070 * device to be removed from the PCI subsystem properly. 11071 **/ 11072 static void 11073 lpfc_pci_remove_one(struct pci_dev *pdev) 11074 { 11075 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11076 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11077 11078 switch (phba->pci_dev_grp) { 11079 case LPFC_PCI_DEV_LP: 11080 lpfc_pci_remove_one_s3(pdev); 11081 break; 11082 case LPFC_PCI_DEV_OC: 11083 lpfc_pci_remove_one_s4(pdev); 11084 break; 11085 default: 11086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11087 "1424 Invalid PCI device group: 0x%x\n", 11088 phba->pci_dev_grp); 11089 break; 11090 } 11091 return; 11092 } 11093 11094 /** 11095 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 11096 * @pdev: pointer to PCI device 11097 * @msg: power management message 11098 * 11099 * This routine is to be registered to the kernel's PCI subsystem to support 11100 * system Power Management (PM). When PM invokes this method, it dispatches 11101 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 11102 * suspend the device. 11103 * 11104 * Return code 11105 * 0 - driver suspended the device 11106 * Error otherwise 11107 **/ 11108 static int 11109 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 11110 { 11111 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11112 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11113 int rc = -ENODEV; 11114 11115 switch (phba->pci_dev_grp) { 11116 case LPFC_PCI_DEV_LP: 11117 rc = lpfc_pci_suspend_one_s3(pdev, msg); 11118 break; 11119 case LPFC_PCI_DEV_OC: 11120 rc = lpfc_pci_suspend_one_s4(pdev, msg); 11121 break; 11122 default: 11123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11124 "1425 Invalid PCI device group: 0x%x\n", 11125 phba->pci_dev_grp); 11126 break; 11127 } 11128 return rc; 11129 } 11130 11131 /** 11132 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 11133 * @pdev: pointer to PCI device 11134 * 11135 * This routine is to be registered to the kernel's PCI subsystem to support 11136 * system Power Management (PM). When PM invokes this method, it dispatches 11137 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 11138 * resume the device. 11139 * 11140 * Return code 11141 * 0 - driver suspended the device 11142 * Error otherwise 11143 **/ 11144 static int 11145 lpfc_pci_resume_one(struct pci_dev *pdev) 11146 { 11147 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11148 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11149 int rc = -ENODEV; 11150 11151 switch (phba->pci_dev_grp) { 11152 case LPFC_PCI_DEV_LP: 11153 rc = lpfc_pci_resume_one_s3(pdev); 11154 break; 11155 case LPFC_PCI_DEV_OC: 11156 rc = lpfc_pci_resume_one_s4(pdev); 11157 break; 11158 default: 11159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11160 "1426 Invalid PCI device group: 0x%x\n", 11161 phba->pci_dev_grp); 11162 break; 11163 } 11164 return rc; 11165 } 11166 11167 /** 11168 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 11169 * @pdev: pointer to PCI device. 11170 * @state: the current PCI connection state. 11171 * 11172 * This routine is registered to the PCI subsystem for error handling. This 11173 * function is called by the PCI subsystem after a PCI bus error affecting 11174 * this device has been detected. When this routine is invoked, it dispatches 11175 * the action to the proper SLI-3 or SLI-4 device error detected handling 11176 * routine, which will perform the proper error detected operation. 11177 * 11178 * Return codes 11179 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11180 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11181 **/ 11182 static pci_ers_result_t 11183 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 11184 { 11185 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11186 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11187 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11188 11189 switch (phba->pci_dev_grp) { 11190 case LPFC_PCI_DEV_LP: 11191 rc = lpfc_io_error_detected_s3(pdev, state); 11192 break; 11193 case LPFC_PCI_DEV_OC: 11194 rc = lpfc_io_error_detected_s4(pdev, state); 11195 break; 11196 default: 11197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11198 "1427 Invalid PCI device group: 0x%x\n", 11199 phba->pci_dev_grp); 11200 break; 11201 } 11202 return rc; 11203 } 11204 11205 /** 11206 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 11207 * @pdev: pointer to PCI device. 11208 * 11209 * This routine is registered to the PCI subsystem for error handling. This 11210 * function is called after PCI bus has been reset to restart the PCI card 11211 * from scratch, as if from a cold-boot. When this routine is invoked, it 11212 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 11213 * routine, which will perform the proper device reset. 11214 * 11215 * Return codes 11216 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11217 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11218 **/ 11219 static pci_ers_result_t 11220 lpfc_io_slot_reset(struct pci_dev *pdev) 11221 { 11222 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11223 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11224 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11225 11226 switch (phba->pci_dev_grp) { 11227 case LPFC_PCI_DEV_LP: 11228 rc = lpfc_io_slot_reset_s3(pdev); 11229 break; 11230 case LPFC_PCI_DEV_OC: 11231 rc = lpfc_io_slot_reset_s4(pdev); 11232 break; 11233 default: 11234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11235 "1428 Invalid PCI device group: 0x%x\n", 11236 phba->pci_dev_grp); 11237 break; 11238 } 11239 return rc; 11240 } 11241 11242 /** 11243 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 11244 * @pdev: pointer to PCI device 11245 * 11246 * This routine is registered to the PCI subsystem for error handling. It 11247 * is called when kernel error recovery tells the lpfc driver that it is 11248 * OK to resume normal PCI operation after PCI bus error recovery. When 11249 * this routine is invoked, it dispatches the action to the proper SLI-3 11250 * or SLI-4 device io_resume routine, which will resume the device operation. 11251 **/ 11252 static void 11253 lpfc_io_resume(struct pci_dev *pdev) 11254 { 11255 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11256 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11257 11258 switch (phba->pci_dev_grp) { 11259 case LPFC_PCI_DEV_LP: 11260 lpfc_io_resume_s3(pdev); 11261 break; 11262 case LPFC_PCI_DEV_OC: 11263 lpfc_io_resume_s4(pdev); 11264 break; 11265 default: 11266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11267 "1429 Invalid PCI device group: 0x%x\n", 11268 phba->pci_dev_grp); 11269 break; 11270 } 11271 return; 11272 } 11273 11274 /** 11275 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 11276 * @phba: pointer to lpfc hba data structure. 11277 * 11278 * This routine checks to see if OAS is supported for this adapter. If 11279 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 11280 * the enable oas flag is cleared and the pool created for OAS device data 11281 * is destroyed. 11282 * 11283 **/ 11284 void 11285 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 11286 { 11287 11288 if (!phba->cfg_EnableXLane) 11289 return; 11290 11291 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11292 phba->cfg_fof = 1; 11293 } else { 11294 phba->cfg_fof = 0; 11295 if (phba->device_data_mem_pool) 11296 mempool_destroy(phba->device_data_mem_pool); 11297 phba->device_data_mem_pool = NULL; 11298 } 11299 11300 return; 11301 } 11302 11303 /** 11304 * lpfc_fof_queue_setup - Set up all the fof queues 11305 * @phba: pointer to lpfc hba data structure. 11306 * 11307 * This routine is invoked to set up all the fof queues for the FC HBA 11308 * operation. 11309 * 11310 * Return codes 11311 * 0 - successful 11312 * -ENOMEM - No available memory 11313 **/ 11314 int 11315 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11316 { 11317 struct lpfc_sli *psli = &phba->sli; 11318 int rc; 11319 11320 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11321 if (rc) 11322 return -ENOMEM; 11323 11324 if (phba->cfg_fof) { 11325 11326 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11327 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11328 if (rc) 11329 goto out_oas_cq; 11330 11331 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11332 phba->sli4_hba.oas_cq, LPFC_FCP); 11333 if (rc) 11334 goto out_oas_wq; 11335 11336 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 11337 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 11338 } 11339 11340 return 0; 11341 11342 out_oas_wq: 11343 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11344 out_oas_cq: 11345 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11346 return rc; 11347 11348 } 11349 11350 /** 11351 * lpfc_fof_queue_create - Create all the fof queues 11352 * @phba: pointer to lpfc hba data structure. 11353 * 11354 * This routine is invoked to allocate all the fof queues for the FC HBA 11355 * operation. For each SLI4 queue type, the parameters such as queue entry 11356 * count (queue depth) shall be taken from the module parameter. For now, 11357 * we just use some constant number as place holder. 11358 * 11359 * Return codes 11360 * 0 - successful 11361 * -ENOMEM - No availble memory 11362 * -EIO - The mailbox failed to complete successfully. 11363 **/ 11364 int 11365 lpfc_fof_queue_create(struct lpfc_hba *phba) 11366 { 11367 struct lpfc_queue *qdesc; 11368 11369 /* Create FOF EQ */ 11370 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11371 phba->sli4_hba.eq_ecount); 11372 if (!qdesc) 11373 goto out_error; 11374 11375 phba->sli4_hba.fof_eq = qdesc; 11376 11377 if (phba->cfg_fof) { 11378 11379 /* Create OAS CQ */ 11380 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11381 phba->sli4_hba.cq_ecount); 11382 if (!qdesc) 11383 goto out_error; 11384 11385 phba->sli4_hba.oas_cq = qdesc; 11386 11387 /* Create OAS WQ */ 11388 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 11389 phba->sli4_hba.wq_ecount); 11390 if (!qdesc) 11391 goto out_error; 11392 11393 phba->sli4_hba.oas_wq = qdesc; 11394 11395 } 11396 return 0; 11397 11398 out_error: 11399 lpfc_fof_queue_destroy(phba); 11400 return -ENOMEM; 11401 } 11402 11403 /** 11404 * lpfc_fof_queue_destroy - Destroy all the fof queues 11405 * @phba: pointer to lpfc hba data structure. 11406 * 11407 * This routine is invoked to release all the SLI4 queues with the FC HBA 11408 * operation. 11409 * 11410 * Return codes 11411 * 0 - successful 11412 **/ 11413 int 11414 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11415 { 11416 /* Release FOF Event queue */ 11417 if (phba->sli4_hba.fof_eq != NULL) { 11418 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11419 phba->sli4_hba.fof_eq = NULL; 11420 } 11421 11422 /* Release OAS Completion queue */ 11423 if (phba->sli4_hba.oas_cq != NULL) { 11424 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11425 phba->sli4_hba.oas_cq = NULL; 11426 } 11427 11428 /* Release OAS Work queue */ 11429 if (phba->sli4_hba.oas_wq != NULL) { 11430 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11431 phba->sli4_hba.oas_wq = NULL; 11432 } 11433 return 0; 11434 } 11435 11436 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11437 11438 static const struct pci_error_handlers lpfc_err_handler = { 11439 .error_detected = lpfc_io_error_detected, 11440 .slot_reset = lpfc_io_slot_reset, 11441 .resume = lpfc_io_resume, 11442 }; 11443 11444 static struct pci_driver lpfc_driver = { 11445 .name = LPFC_DRIVER_NAME, 11446 .id_table = lpfc_id_table, 11447 .probe = lpfc_pci_probe_one, 11448 .remove = lpfc_pci_remove_one, 11449 .suspend = lpfc_pci_suspend_one, 11450 .resume = lpfc_pci_resume_one, 11451 .err_handler = &lpfc_err_handler, 11452 }; 11453 11454 static const struct file_operations lpfc_mgmt_fop = { 11455 .owner = THIS_MODULE, 11456 }; 11457 11458 static struct miscdevice lpfc_mgmt_dev = { 11459 .minor = MISC_DYNAMIC_MINOR, 11460 .name = "lpfcmgmt", 11461 .fops = &lpfc_mgmt_fop, 11462 }; 11463 11464 /** 11465 * lpfc_init - lpfc module initialization routine 11466 * 11467 * This routine is to be invoked when the lpfc module is loaded into the 11468 * kernel. The special kernel macro module_init() is used to indicate the 11469 * role of this routine to the kernel as lpfc module entry point. 11470 * 11471 * Return codes 11472 * 0 - successful 11473 * -ENOMEM - FC attach transport failed 11474 * all others - failed 11475 */ 11476 static int __init 11477 lpfc_init(void) 11478 { 11479 int cpu; 11480 int error = 0; 11481 11482 printk(LPFC_MODULE_DESC "\n"); 11483 printk(LPFC_COPYRIGHT "\n"); 11484 11485 error = misc_register(&lpfc_mgmt_dev); 11486 if (error) 11487 printk(KERN_ERR "Could not register lpfcmgmt device, " 11488 "misc_register returned with status %d", error); 11489 11490 lpfc_transport_functions.vport_create = lpfc_vport_create; 11491 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11492 lpfc_transport_template = 11493 fc_attach_transport(&lpfc_transport_functions); 11494 if (lpfc_transport_template == NULL) 11495 return -ENOMEM; 11496 lpfc_vport_transport_template = 11497 fc_attach_transport(&lpfc_vport_transport_functions); 11498 if (lpfc_vport_transport_template == NULL) { 11499 fc_release_transport(lpfc_transport_template); 11500 return -ENOMEM; 11501 } 11502 11503 /* Initialize in case vector mapping is needed */ 11504 lpfc_used_cpu = NULL; 11505 lpfc_present_cpu = 0; 11506 for_each_present_cpu(cpu) 11507 lpfc_present_cpu++; 11508 11509 error = pci_register_driver(&lpfc_driver); 11510 if (error) { 11511 fc_release_transport(lpfc_transport_template); 11512 fc_release_transport(lpfc_vport_transport_template); 11513 } 11514 11515 return error; 11516 } 11517 11518 /** 11519 * lpfc_exit - lpfc module removal routine 11520 * 11521 * This routine is invoked when the lpfc module is removed from the kernel. 11522 * The special kernel macro module_exit() is used to indicate the role of 11523 * this routine to the kernel as lpfc module exit point. 11524 */ 11525 static void __exit 11526 lpfc_exit(void) 11527 { 11528 misc_deregister(&lpfc_mgmt_dev); 11529 pci_unregister_driver(&lpfc_driver); 11530 fc_release_transport(lpfc_transport_template); 11531 fc_release_transport(lpfc_vport_transport_template); 11532 if (_dump_buf_data) { 11533 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11534 "_dump_buf_data at 0x%p\n", 11535 (1L << _dump_buf_data_order), _dump_buf_data); 11536 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11537 } 11538 11539 if (_dump_buf_dif) { 11540 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11541 "_dump_buf_dif at 0x%p\n", 11542 (1L << _dump_buf_dif_order), _dump_buf_dif); 11543 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11544 } 11545 kfree(lpfc_used_cpu); 11546 idr_destroy(&lpfc_hba_index); 11547 } 11548 11549 module_init(lpfc_init); 11550 module_exit(lpfc_exit); 11551 MODULE_LICENSE("GPL"); 11552 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11553 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11554 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11555