1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/kthread.h> 29 #include <linux/pci.h> 30 #include <linux/spinlock.h> 31 #include <linux/ctype.h> 32 #include <linux/aer.h> 33 #include <linux/slab.h> 34 #include <linux/firmware.h> 35 #include <linux/miscdevice.h> 36 #include <linux/percpu.h> 37 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_transport_fc.h> 42 43 #include "lpfc_hw4.h" 44 #include "lpfc_hw.h" 45 #include "lpfc_sli.h" 46 #include "lpfc_sli4.h" 47 #include "lpfc_nl.h" 48 #include "lpfc_disc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc.h" 51 #include "lpfc_logmsg.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_vport.h" 54 #include "lpfc_version.h" 55 #include "lpfc_ids.h" 56 57 char *_dump_buf_data; 58 unsigned long _dump_buf_data_order; 59 char *_dump_buf_dif; 60 unsigned long _dump_buf_dif_order; 61 spinlock_t _dump_buf_lock; 62 63 /* Used when mapping IRQ vectors in a driver centric manner */ 64 uint16_t *lpfc_used_cpu; 65 uint32_t lpfc_present_cpu; 66 67 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 68 static int lpfc_post_rcv_buf(struct lpfc_hba *); 69 static int lpfc_sli4_queue_verify(struct lpfc_hba *); 70 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 71 static int lpfc_setup_endian_order(struct lpfc_hba *); 72 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); 73 static void lpfc_free_els_sgl_list(struct lpfc_hba *); 74 static void lpfc_init_sgl_list(struct lpfc_hba *); 75 static int lpfc_init_active_sgl_array(struct lpfc_hba *); 76 static void lpfc_free_active_sgl(struct lpfc_hba *); 77 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); 78 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); 79 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); 80 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); 81 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); 82 static void lpfc_sli4_disable_intr(struct lpfc_hba *); 83 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); 84 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); 85 86 static struct scsi_transport_template *lpfc_transport_template = NULL; 87 static struct scsi_transport_template *lpfc_vport_transport_template = NULL; 88 static DEFINE_IDR(lpfc_hba_index); 89 90 /** 91 * lpfc_config_port_prep - Perform lpfc initialization prior to config port 92 * @phba: pointer to lpfc hba data structure. 93 * 94 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT 95 * mailbox command. It retrieves the revision information from the HBA and 96 * collects the Vital Product Data (VPD) about the HBA for preparing the 97 * configuration of the HBA. 98 * 99 * Return codes: 100 * 0 - success. 101 * -ERESTART - requests the SLI layer to reset the HBA and try again. 102 * Any other value - indicates an error. 103 **/ 104 int 105 lpfc_config_port_prep(struct lpfc_hba *phba) 106 { 107 lpfc_vpd_t *vp = &phba->vpd; 108 int i = 0, rc; 109 LPFC_MBOXQ_t *pmb; 110 MAILBOX_t *mb; 111 char *lpfc_vpd_data = NULL; 112 uint16_t offset = 0; 113 static char licensed[56] = 114 "key unlock for use with gnu public licensed code only\0"; 115 static int init_key = 1; 116 117 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 118 if (!pmb) { 119 phba->link_state = LPFC_HBA_ERROR; 120 return -ENOMEM; 121 } 122 123 mb = &pmb->u.mb; 124 phba->link_state = LPFC_INIT_MBX_CMDS; 125 126 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 127 if (init_key) { 128 uint32_t *ptext = (uint32_t *) licensed; 129 130 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++) 131 *ptext = cpu_to_be32(*ptext); 132 init_key = 0; 133 } 134 135 lpfc_read_nv(phba, pmb); 136 memset((char*)mb->un.varRDnvp.rsvd3, 0, 137 sizeof (mb->un.varRDnvp.rsvd3)); 138 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed, 139 sizeof (licensed)); 140 141 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 142 143 if (rc != MBX_SUCCESS) { 144 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 145 "0324 Config Port initialization " 146 "error, mbxCmd x%x READ_NVPARM, " 147 "mbxStatus x%x\n", 148 mb->mbxCommand, mb->mbxStatus); 149 mempool_free(pmb, phba->mbox_mem_pool); 150 return -ERESTART; 151 } 152 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 153 sizeof(phba->wwnn)); 154 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname, 155 sizeof(phba->wwpn)); 156 } 157 158 phba->sli3_options = 0x0; 159 160 /* Setup and issue mailbox READ REV command */ 161 lpfc_read_rev(phba, pmb); 162 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 163 if (rc != MBX_SUCCESS) { 164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 165 "0439 Adapter failed to init, mbxCmd x%x " 166 "READ_REV, mbxStatus x%x\n", 167 mb->mbxCommand, mb->mbxStatus); 168 mempool_free( pmb, phba->mbox_mem_pool); 169 return -ERESTART; 170 } 171 172 173 /* 174 * The value of rr must be 1 since the driver set the cv field to 1. 175 * This setting requires the FW to set all revision fields. 176 */ 177 if (mb->un.varRdRev.rr == 0) { 178 vp->rev.rBit = 0; 179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 180 "0440 Adapter failed to init, READ_REV has " 181 "missing revision information.\n"); 182 mempool_free(pmb, phba->mbox_mem_pool); 183 return -ERESTART; 184 } 185 186 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) { 187 mempool_free(pmb, phba->mbox_mem_pool); 188 return -EINVAL; 189 } 190 191 /* Save information as VPD data */ 192 vp->rev.rBit = 1; 193 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t)); 194 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 195 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 196 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 197 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16); 198 vp->rev.biuRev = mb->un.varRdRev.biuRev; 199 vp->rev.smRev = mb->un.varRdRev.smRev; 200 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev; 201 vp->rev.endecRev = mb->un.varRdRev.endecRev; 202 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh; 203 vp->rev.fcphLow = mb->un.varRdRev.fcphLow; 204 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh; 205 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow; 206 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 207 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 208 209 /* If the sli feature level is less then 9, we must 210 * tear down all RPIs and VPIs on link down if NPIV 211 * is enabled. 212 */ 213 if (vp->rev.feaLevelHigh < 9) 214 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN; 215 216 if (lpfc_is_LC_HBA(phba->pcidev->device)) 217 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 218 sizeof (phba->RandomData)); 219 220 /* Get adapter VPD information */ 221 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL); 222 if (!lpfc_vpd_data) 223 goto out_free_mbox; 224 do { 225 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD); 226 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 227 228 if (rc != MBX_SUCCESS) { 229 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 230 "0441 VPD not present on adapter, " 231 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n", 232 mb->mbxCommand, mb->mbxStatus); 233 mb->un.varDmp.word_cnt = 0; 234 } 235 /* dump mem may return a zero when finished or we got a 236 * mailbox error, either way we are done. 237 */ 238 if (mb->un.varDmp.word_cnt == 0) 239 break; 240 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 241 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 242 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 243 lpfc_vpd_data + offset, 244 mb->un.varDmp.word_cnt); 245 offset += mb->un.varDmp.word_cnt; 246 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 247 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 248 249 kfree(lpfc_vpd_data); 250 out_free_mbox: 251 mempool_free(pmb, phba->mbox_mem_pool); 252 return 0; 253 } 254 255 /** 256 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd 257 * @phba: pointer to lpfc hba data structure. 258 * @pmboxq: pointer to the driver internal queue element for mailbox command. 259 * 260 * This is the completion handler for driver's configuring asynchronous event 261 * mailbox command to the device. If the mailbox command returns successfully, 262 * it will set internal async event support flag to 1; otherwise, it will 263 * set internal async event support flag to 0. 264 **/ 265 static void 266 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 267 { 268 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) 269 phba->temp_sensor_support = 1; 270 else 271 phba->temp_sensor_support = 0; 272 mempool_free(pmboxq, phba->mbox_mem_pool); 273 return; 274 } 275 276 /** 277 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler 278 * @phba: pointer to lpfc hba data structure. 279 * @pmboxq: pointer to the driver internal queue element for mailbox command. 280 * 281 * This is the completion handler for dump mailbox command for getting 282 * wake up parameters. When this command complete, the response contain 283 * Option rom version of the HBA. This function translate the version number 284 * into a human readable string and store it in OptionROMVersion. 285 **/ 286 static void 287 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 288 { 289 struct prog_id *prg; 290 uint32_t prog_id_word; 291 char dist = ' '; 292 /* character array used for decoding dist type. */ 293 char dist_char[] = "nabx"; 294 295 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { 296 mempool_free(pmboxq, phba->mbox_mem_pool); 297 return; 298 } 299 300 prg = (struct prog_id *) &prog_id_word; 301 302 /* word 7 contain option rom version */ 303 prog_id_word = pmboxq->u.mb.un.varWords[7]; 304 305 /* Decode the Option rom version word to a readable string */ 306 if (prg->dist < 4) 307 dist = dist_char[prg->dist]; 308 309 if ((prg->dist == 3) && (prg->num == 0)) 310 snprintf(phba->OptionROMVersion, 32, "%d.%d%d", 311 prg->ver, prg->rev, prg->lev); 312 else 313 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d", 314 prg->ver, prg->rev, prg->lev, 315 dist, prg->num); 316 mempool_free(pmboxq, phba->mbox_mem_pool); 317 return; 318 } 319 320 /** 321 * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname, 322 * cfg_soft_wwnn, cfg_soft_wwpn 323 * @vport: pointer to lpfc vport data structure. 324 * 325 * 326 * Return codes 327 * None. 328 **/ 329 void 330 lpfc_update_vport_wwn(struct lpfc_vport *vport) 331 { 332 /* If the soft name exists then update it using the service params */ 333 if (vport->phba->cfg_soft_wwnn) 334 u64_to_wwn(vport->phba->cfg_soft_wwnn, 335 vport->fc_sparam.nodeName.u.wwn); 336 if (vport->phba->cfg_soft_wwpn) 337 u64_to_wwn(vport->phba->cfg_soft_wwpn, 338 vport->fc_sparam.portName.u.wwn); 339 340 /* 341 * If the name is empty or there exists a soft name 342 * then copy the service params name, otherwise use the fc name 343 */ 344 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn) 345 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 346 sizeof(struct lpfc_name)); 347 else 348 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename, 349 sizeof(struct lpfc_name)); 350 351 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn) 352 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 353 sizeof(struct lpfc_name)); 354 else 355 memcpy(&vport->fc_sparam.portName, &vport->fc_portname, 356 sizeof(struct lpfc_name)); 357 } 358 359 /** 360 * lpfc_config_port_post - Perform lpfc initialization after config port 361 * @phba: pointer to lpfc hba data structure. 362 * 363 * This routine will do LPFC initialization after the CONFIG_PORT mailbox 364 * command call. It performs all internal resource and state setups on the 365 * port: post IOCB buffers, enable appropriate host interrupt attentions, 366 * ELS ring timers, etc. 367 * 368 * Return codes 369 * 0 - success. 370 * Any other value - error. 371 **/ 372 int 373 lpfc_config_port_post(struct lpfc_hba *phba) 374 { 375 struct lpfc_vport *vport = phba->pport; 376 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 377 LPFC_MBOXQ_t *pmb; 378 MAILBOX_t *mb; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_sli *psli = &phba->sli; 381 uint32_t status, timeout; 382 int i, j; 383 int rc; 384 385 spin_lock_irq(&phba->hbalock); 386 /* 387 * If the Config port completed correctly the HBA is not 388 * over heated any more. 389 */ 390 if (phba->over_temp_state == HBA_OVER_TEMP) 391 phba->over_temp_state = HBA_NORMAL_TEMP; 392 spin_unlock_irq(&phba->hbalock); 393 394 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 395 if (!pmb) { 396 phba->link_state = LPFC_HBA_ERROR; 397 return -ENOMEM; 398 } 399 mb = &pmb->u.mb; 400 401 /* Get login parameters for NID. */ 402 rc = lpfc_read_sparam(phba, pmb, 0); 403 if (rc) { 404 mempool_free(pmb, phba->mbox_mem_pool); 405 return -ENOMEM; 406 } 407 408 pmb->vport = vport; 409 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 411 "0448 Adapter failed init, mbxCmd x%x " 412 "READ_SPARM mbxStatus x%x\n", 413 mb->mbxCommand, mb->mbxStatus); 414 phba->link_state = LPFC_HBA_ERROR; 415 mp = (struct lpfc_dmabuf *) pmb->context1; 416 mempool_free(pmb, phba->mbox_mem_pool); 417 lpfc_mbuf_free(phba, mp->virt, mp->phys); 418 kfree(mp); 419 return -EIO; 420 } 421 422 mp = (struct lpfc_dmabuf *) pmb->context1; 423 424 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 425 lpfc_mbuf_free(phba, mp->virt, mp->phys); 426 kfree(mp); 427 pmb->context1 = NULL; 428 lpfc_update_vport_wwn(vport); 429 430 /* Update the fc_host data structures with new wwn. */ 431 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 432 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 433 fc_host_max_npiv_vports(shost) = phba->max_vpi; 434 435 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 436 /* This should be consolidated into parse_vpd ? - mr */ 437 if (phba->SerialNumber[0] == 0) { 438 uint8_t *outptr; 439 440 outptr = &vport->fc_nodename.u.s.IEEE[0]; 441 for (i = 0; i < 12; i++) { 442 status = *outptr++; 443 j = ((status & 0xf0) >> 4); 444 if (j <= 9) 445 phba->SerialNumber[i] = 446 (char)((uint8_t) 0x30 + (uint8_t) j); 447 else 448 phba->SerialNumber[i] = 449 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 450 i++; 451 j = (status & 0xf); 452 if (j <= 9) 453 phba->SerialNumber[i] = 454 (char)((uint8_t) 0x30 + (uint8_t) j); 455 else 456 phba->SerialNumber[i] = 457 (char)((uint8_t) 0x61 + (uint8_t) (j - 10)); 458 } 459 } 460 461 lpfc_read_config(phba, pmb); 462 pmb->vport = vport; 463 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 464 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 465 "0453 Adapter failed to init, mbxCmd x%x " 466 "READ_CONFIG, mbxStatus x%x\n", 467 mb->mbxCommand, mb->mbxStatus); 468 phba->link_state = LPFC_HBA_ERROR; 469 mempool_free( pmb, phba->mbox_mem_pool); 470 return -EIO; 471 } 472 473 /* Check if the port is disabled */ 474 lpfc_sli_read_link_ste(phba); 475 476 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 477 i = (mb->un.varRdConfig.max_xri + 1); 478 if (phba->cfg_hba_queue_depth > i) { 479 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 480 "3359 HBA queue depth changed from %d to %d\n", 481 phba->cfg_hba_queue_depth, i); 482 phba->cfg_hba_queue_depth = i; 483 } 484 485 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 486 i = (mb->un.varRdConfig.max_xri >> 3); 487 if (phba->pport->cfg_lun_queue_depth > i) { 488 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 489 "3360 LUN queue depth changed from %d to %d\n", 490 phba->pport->cfg_lun_queue_depth, i); 491 phba->pport->cfg_lun_queue_depth = i; 492 } 493 494 phba->lmt = mb->un.varRdConfig.lmt; 495 496 /* Get the default values for Model Name and Description */ 497 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 498 499 phba->link_state = LPFC_LINK_DOWN; 500 501 /* Only process IOCBs on ELS ring till hba_state is READY */ 502 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr) 503 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 504 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr) 505 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 506 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr) 507 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 508 509 /* Post receive buffers for desired rings */ 510 if (phba->sli_rev != 3) 511 lpfc_post_rcv_buf(phba); 512 513 /* 514 * Configure HBA MSI-X attention conditions to messages if MSI-X mode 515 */ 516 if (phba->intr_type == MSIX) { 517 rc = lpfc_config_msi(phba, pmb); 518 if (rc) { 519 mempool_free(pmb, phba->mbox_mem_pool); 520 return -EIO; 521 } 522 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 523 if (rc != MBX_SUCCESS) { 524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 525 "0352 Config MSI mailbox command " 526 "failed, mbxCmd x%x, mbxStatus x%x\n", 527 pmb->u.mb.mbxCommand, 528 pmb->u.mb.mbxStatus); 529 mempool_free(pmb, phba->mbox_mem_pool); 530 return -EIO; 531 } 532 } 533 534 spin_lock_irq(&phba->hbalock); 535 /* Initialize ERATT handling flag */ 536 phba->hba_flag &= ~HBA_ERATT_HANDLED; 537 538 /* Enable appropriate host interrupts */ 539 if (lpfc_readl(phba->HCregaddr, &status)) { 540 spin_unlock_irq(&phba->hbalock); 541 return -EIO; 542 } 543 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 544 if (psli->num_rings > 0) 545 status |= HC_R0INT_ENA; 546 if (psli->num_rings > 1) 547 status |= HC_R1INT_ENA; 548 if (psli->num_rings > 2) 549 status |= HC_R2INT_ENA; 550 if (psli->num_rings > 3) 551 status |= HC_R3INT_ENA; 552 553 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) && 554 (phba->cfg_poll & DISABLE_FCP_RING_INT)) 555 status &= ~(HC_R0INT_ENA); 556 557 writel(status, phba->HCregaddr); 558 readl(phba->HCregaddr); /* flush */ 559 spin_unlock_irq(&phba->hbalock); 560 561 /* Set up ring-0 (ELS) timer */ 562 timeout = phba->fc_ratov * 2; 563 mod_timer(&vport->els_tmofunc, 564 jiffies + msecs_to_jiffies(1000 * timeout)); 565 /* Set up heart beat (HB) timer */ 566 mod_timer(&phba->hb_tmofunc, 567 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 568 phba->hb_outstanding = 0; 569 phba->last_completion_time = jiffies; 570 /* Set up error attention (ERATT) polling timer */ 571 mod_timer(&phba->eratt_poll, 572 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 573 574 if (phba->hba_flag & LINK_DISABLED) { 575 lpfc_printf_log(phba, 576 KERN_ERR, LOG_INIT, 577 "2598 Adapter Link is disabled.\n"); 578 lpfc_down_link(phba, pmb); 579 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 581 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 582 lpfc_printf_log(phba, 583 KERN_ERR, LOG_INIT, 584 "2599 Adapter failed to issue DOWN_LINK" 585 " mbox command rc 0x%x\n", rc); 586 587 mempool_free(pmb, phba->mbox_mem_pool); 588 return -EIO; 589 } 590 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 591 mempool_free(pmb, phba->mbox_mem_pool); 592 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 593 if (rc) 594 return rc; 595 } 596 /* MBOX buffer will be freed in mbox compl */ 597 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 598 if (!pmb) { 599 phba->link_state = LPFC_HBA_ERROR; 600 return -ENOMEM; 601 } 602 603 lpfc_config_async(phba, pmb, LPFC_ELS_RING); 604 pmb->mbox_cmpl = lpfc_config_async_cmpl; 605 pmb->vport = phba->pport; 606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 607 608 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 609 lpfc_printf_log(phba, 610 KERN_ERR, 611 LOG_INIT, 612 "0456 Adapter failed to issue " 613 "ASYNCEVT_ENABLE mbox status x%x\n", 614 rc); 615 mempool_free(pmb, phba->mbox_mem_pool); 616 } 617 618 /* Get Option rom version */ 619 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 620 if (!pmb) { 621 phba->link_state = LPFC_HBA_ERROR; 622 return -ENOMEM; 623 } 624 625 lpfc_dump_wakeup_param(phba, pmb); 626 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; 627 pmb->vport = phba->pport; 628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 629 630 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed " 632 "to get Option ROM version status x%x\n", rc); 633 mempool_free(pmb, phba->mbox_mem_pool); 634 } 635 636 return 0; 637 } 638 639 /** 640 * lpfc_hba_init_link - Initialize the FC link 641 * @phba: pointer to lpfc hba data structure. 642 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 643 * 644 * This routine will issue the INIT_LINK mailbox command call. 645 * It is available to other drivers through the lpfc_hba data 646 * structure for use as a delayed link up mechanism with the 647 * module parameter lpfc_suppress_link_up. 648 * 649 * Return code 650 * 0 - success 651 * Any other value - error 652 **/ 653 static int 654 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 655 { 656 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 657 } 658 659 /** 660 * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology 661 * @phba: pointer to lpfc hba data structure. 662 * @fc_topology: desired fc topology. 663 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 664 * 665 * This routine will issue the INIT_LINK mailbox command call. 666 * It is available to other drivers through the lpfc_hba data 667 * structure for use as a delayed link up mechanism with the 668 * module parameter lpfc_suppress_link_up. 669 * 670 * Return code 671 * 0 - success 672 * Any other value - error 673 **/ 674 int 675 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology, 676 uint32_t flag) 677 { 678 struct lpfc_vport *vport = phba->pport; 679 LPFC_MBOXQ_t *pmb; 680 MAILBOX_t *mb; 681 int rc; 682 683 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 684 if (!pmb) { 685 phba->link_state = LPFC_HBA_ERROR; 686 return -ENOMEM; 687 } 688 mb = &pmb->u.mb; 689 pmb->vport = vport; 690 691 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) || 692 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) && 693 !(phba->lmt & LMT_1Gb)) || 694 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) && 695 !(phba->lmt & LMT_2Gb)) || 696 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) && 697 !(phba->lmt & LMT_4Gb)) || 698 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) && 699 !(phba->lmt & LMT_8Gb)) || 700 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) && 701 !(phba->lmt & LMT_10Gb)) || 702 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && 703 !(phba->lmt & LMT_16Gb)) || 704 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) && 705 !(phba->lmt & LMT_32Gb))) { 706 /* Reset link speed to auto */ 707 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 708 "1302 Invalid speed for this board:%d " 709 "Reset link speed to auto.\n", 710 phba->cfg_link_speed); 711 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; 712 } 713 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed); 714 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 715 if (phba->sli_rev < LPFC_SLI_REV4) 716 lpfc_set_loopback_flag(phba); 717 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 718 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 719 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 720 "0498 Adapter failed to init, mbxCmd x%x " 721 "INIT_LINK, mbxStatus x%x\n", 722 mb->mbxCommand, mb->mbxStatus); 723 if (phba->sli_rev <= LPFC_SLI_REV3) { 724 /* Clear all interrupt enable conditions */ 725 writel(0, phba->HCregaddr); 726 readl(phba->HCregaddr); /* flush */ 727 /* Clear all pending interrupts */ 728 writel(0xffffffff, phba->HAregaddr); 729 readl(phba->HAregaddr); /* flush */ 730 } 731 phba->link_state = LPFC_HBA_ERROR; 732 if (rc != MBX_BUSY || flag == MBX_POLL) 733 mempool_free(pmb, phba->mbox_mem_pool); 734 return -EIO; 735 } 736 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; 737 if (flag == MBX_POLL) 738 mempool_free(pmb, phba->mbox_mem_pool); 739 740 return 0; 741 } 742 743 /** 744 * lpfc_hba_down_link - this routine downs the FC link 745 * @phba: pointer to lpfc hba data structure. 746 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT 747 * 748 * This routine will issue the DOWN_LINK mailbox command call. 749 * It is available to other drivers through the lpfc_hba data 750 * structure for use to stop the link. 751 * 752 * Return code 753 * 0 - success 754 * Any other value - error 755 **/ 756 static int 757 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 758 { 759 LPFC_MBOXQ_t *pmb; 760 int rc; 761 762 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 763 if (!pmb) { 764 phba->link_state = LPFC_HBA_ERROR; 765 return -ENOMEM; 766 } 767 768 lpfc_printf_log(phba, 769 KERN_ERR, LOG_INIT, 770 "0491 Adapter Link is disabled.\n"); 771 lpfc_down_link(phba, pmb); 772 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 773 rc = lpfc_sli_issue_mbox(phba, pmb, flag); 774 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 775 lpfc_printf_log(phba, 776 KERN_ERR, LOG_INIT, 777 "2522 Adapter failed to issue DOWN_LINK" 778 " mbox command rc 0x%x\n", rc); 779 780 mempool_free(pmb, phba->mbox_mem_pool); 781 return -EIO; 782 } 783 if (flag == MBX_POLL) 784 mempool_free(pmb, phba->mbox_mem_pool); 785 786 return 0; 787 } 788 789 /** 790 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset 791 * @phba: pointer to lpfc HBA data structure. 792 * 793 * This routine will do LPFC uninitialization before the HBA is reset when 794 * bringing down the SLI Layer. 795 * 796 * Return codes 797 * 0 - success. 798 * Any other value - error. 799 **/ 800 int 801 lpfc_hba_down_prep(struct lpfc_hba *phba) 802 { 803 struct lpfc_vport **vports; 804 int i; 805 806 if (phba->sli_rev <= LPFC_SLI_REV3) { 807 /* Disable interrupts */ 808 writel(0, phba->HCregaddr); 809 readl(phba->HCregaddr); /* flush */ 810 } 811 812 if (phba->pport->load_flag & FC_UNLOADING) 813 lpfc_cleanup_discovery_resources(phba->pport); 814 else { 815 vports = lpfc_create_vport_work_array(phba); 816 if (vports != NULL) 817 for (i = 0; i <= phba->max_vports && 818 vports[i] != NULL; i++) 819 lpfc_cleanup_discovery_resources(vports[i]); 820 lpfc_destroy_vport_work_array(phba, vports); 821 } 822 return 0; 823 } 824 825 /** 826 * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free 827 * rspiocb which got deferred 828 * 829 * @phba: pointer to lpfc HBA data structure. 830 * 831 * This routine will cleanup completed slow path events after HBA is reset 832 * when bringing down the SLI Layer. 833 * 834 * 835 * Return codes 836 * void. 837 **/ 838 static void 839 lpfc_sli4_free_sp_events(struct lpfc_hba *phba) 840 { 841 struct lpfc_iocbq *rspiocbq; 842 struct hbq_dmabuf *dmabuf; 843 struct lpfc_cq_event *cq_event; 844 845 spin_lock_irq(&phba->hbalock); 846 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 847 spin_unlock_irq(&phba->hbalock); 848 849 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 850 /* Get the response iocb from the head of work queue */ 851 spin_lock_irq(&phba->hbalock); 852 list_remove_head(&phba->sli4_hba.sp_queue_event, 853 cq_event, struct lpfc_cq_event, list); 854 spin_unlock_irq(&phba->hbalock); 855 856 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 857 case CQE_CODE_COMPL_WQE: 858 rspiocbq = container_of(cq_event, struct lpfc_iocbq, 859 cq_event); 860 lpfc_sli_release_iocbq(phba, rspiocbq); 861 break; 862 case CQE_CODE_RECEIVE: 863 case CQE_CODE_RECEIVE_V1: 864 dmabuf = container_of(cq_event, struct hbq_dmabuf, 865 cq_event); 866 lpfc_in_buf_free(phba, &dmabuf->dbuf); 867 } 868 } 869 } 870 871 /** 872 * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset 873 * @phba: pointer to lpfc HBA data structure. 874 * 875 * This routine will cleanup posted ELS buffers after the HBA is reset 876 * when bringing down the SLI Layer. 877 * 878 * 879 * Return codes 880 * void. 881 **/ 882 static void 883 lpfc_hba_free_post_buf(struct lpfc_hba *phba) 884 { 885 struct lpfc_sli *psli = &phba->sli; 886 struct lpfc_sli_ring *pring; 887 struct lpfc_dmabuf *mp, *next_mp; 888 LIST_HEAD(buflist); 889 int count; 890 891 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) 892 lpfc_sli_hbqbuf_free_all(phba); 893 else { 894 /* Cleanup preposted buffers on the ELS ring */ 895 pring = &psli->ring[LPFC_ELS_RING]; 896 spin_lock_irq(&phba->hbalock); 897 list_splice_init(&pring->postbufq, &buflist); 898 spin_unlock_irq(&phba->hbalock); 899 900 count = 0; 901 list_for_each_entry_safe(mp, next_mp, &buflist, list) { 902 list_del(&mp->list); 903 count++; 904 lpfc_mbuf_free(phba, mp->virt, mp->phys); 905 kfree(mp); 906 } 907 908 spin_lock_irq(&phba->hbalock); 909 pring->postbufq_cnt -= count; 910 spin_unlock_irq(&phba->hbalock); 911 } 912 } 913 914 /** 915 * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset 916 * @phba: pointer to lpfc HBA data structure. 917 * 918 * This routine will cleanup the txcmplq after the HBA is reset when bringing 919 * down the SLI Layer. 920 * 921 * Return codes 922 * void 923 **/ 924 static void 925 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) 926 { 927 struct lpfc_sli *psli = &phba->sli; 928 struct lpfc_sli_ring *pring; 929 LIST_HEAD(completions); 930 int i; 931 932 for (i = 0; i < psli->num_rings; i++) { 933 pring = &psli->ring[i]; 934 if (phba->sli_rev >= LPFC_SLI_REV4) 935 spin_lock_irq(&pring->ring_lock); 936 else 937 spin_lock_irq(&phba->hbalock); 938 /* At this point in time the HBA is either reset or DOA. Either 939 * way, nothing should be on txcmplq as it will NEVER complete. 940 */ 941 list_splice_init(&pring->txcmplq, &completions); 942 pring->txcmplq_cnt = 0; 943 944 if (phba->sli_rev >= LPFC_SLI_REV4) 945 spin_unlock_irq(&pring->ring_lock); 946 else 947 spin_unlock_irq(&phba->hbalock); 948 949 /* Cancel all the IOCBs from the completions list */ 950 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 951 IOERR_SLI_ABORTED); 952 lpfc_sli_abort_iocb_ring(phba, pring); 953 } 954 } 955 956 /** 957 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset 958 int i; 959 * @phba: pointer to lpfc HBA data structure. 960 * 961 * This routine will do uninitialization after the HBA is reset when bring 962 * down the SLI Layer. 963 * 964 * Return codes 965 * 0 - success. 966 * Any other value - error. 967 **/ 968 static int 969 lpfc_hba_down_post_s3(struct lpfc_hba *phba) 970 { 971 lpfc_hba_free_post_buf(phba); 972 lpfc_hba_clean_txcmplq(phba); 973 return 0; 974 } 975 976 /** 977 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset 978 * @phba: pointer to lpfc HBA data structure. 979 * 980 * This routine will do uninitialization after the HBA is reset when bring 981 * down the SLI Layer. 982 * 983 * Return codes 984 * 0 - success. 985 * Any other value - error. 986 **/ 987 static int 988 lpfc_hba_down_post_s4(struct lpfc_hba *phba) 989 { 990 struct lpfc_scsi_buf *psb, *psb_next; 991 LIST_HEAD(aborts); 992 unsigned long iflag = 0; 993 struct lpfc_sglq *sglq_entry = NULL; 994 struct lpfc_sli *psli = &phba->sli; 995 struct lpfc_sli_ring *pring; 996 997 lpfc_hba_free_post_buf(phba); 998 lpfc_hba_clean_txcmplq(phba); 999 pring = &psli->ring[LPFC_ELS_RING]; 1000 1001 /* At this point in time the HBA is either reset or DOA. Either 1002 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 1003 * on the lpfc_sgl_list so that it can either be freed if the 1004 * driver is unloading or reposted if the driver is restarting 1005 * the port. 1006 */ 1007 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ 1008 /* scsl_buf_list */ 1009 /* abts_sgl_list_lock required because worker thread uses this 1010 * list. 1011 */ 1012 spin_lock(&phba->sli4_hba.abts_sgl_list_lock); 1013 list_for_each_entry(sglq_entry, 1014 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1015 sglq_entry->state = SGL_FREED; 1016 1017 spin_lock(&pring->ring_lock); 1018 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1019 &phba->sli4_hba.lpfc_sgl_list); 1020 spin_unlock(&pring->ring_lock); 1021 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1022 /* abts_scsi_buf_list_lock required because worker thread uses this 1023 * list. 1024 */ 1025 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1026 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, 1027 &aborts); 1028 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); 1029 spin_unlock_irq(&phba->hbalock); 1030 1031 list_for_each_entry_safe(psb, psb_next, &aborts, list) { 1032 psb->pCmd = NULL; 1033 psb->status = IOSTAT_SUCCESS; 1034 } 1035 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); 1036 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); 1037 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); 1038 1039 lpfc_sli4_free_sp_events(phba); 1040 return 0; 1041 } 1042 1043 /** 1044 * lpfc_hba_down_post - Wrapper func for hba down post routine 1045 * @phba: pointer to lpfc HBA data structure. 1046 * 1047 * This routine wraps the actual SLI3 or SLI4 routine for performing 1048 * uninitialization after the HBA is reset when bring down the SLI Layer. 1049 * 1050 * Return codes 1051 * 0 - success. 1052 * Any other value - error. 1053 **/ 1054 int 1055 lpfc_hba_down_post(struct lpfc_hba *phba) 1056 { 1057 return (*phba->lpfc_hba_down_post)(phba); 1058 } 1059 1060 /** 1061 * lpfc_hb_timeout - The HBA-timer timeout handler 1062 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1063 * 1064 * This is the HBA-timer timeout handler registered to the lpfc driver. When 1065 * this timer fires, a HBA timeout event shall be posted to the lpfc driver 1066 * work-port-events bitmap and the worker thread is notified. This timeout 1067 * event will be used by the worker thread to invoke the actual timeout 1068 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will 1069 * be performed in the timeout handler and the HBA timeout event bit shall 1070 * be cleared by the worker thread after it has taken the event bitmap out. 1071 **/ 1072 static void 1073 lpfc_hb_timeout(unsigned long ptr) 1074 { 1075 struct lpfc_hba *phba; 1076 uint32_t tmo_posted; 1077 unsigned long iflag; 1078 1079 phba = (struct lpfc_hba *)ptr; 1080 1081 /* Check for heart beat timeout conditions */ 1082 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1083 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO; 1084 if (!tmo_posted) 1085 phba->pport->work_port_events |= WORKER_HB_TMO; 1086 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1087 1088 /* Tell the worker thread there is work to do */ 1089 if (!tmo_posted) 1090 lpfc_worker_wake_up(phba); 1091 return; 1092 } 1093 1094 /** 1095 * lpfc_rrq_timeout - The RRQ-timer timeout handler 1096 * @ptr: unsigned long holds the pointer to lpfc hba data structure. 1097 * 1098 * This is the RRQ-timer timeout handler registered to the lpfc driver. When 1099 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver 1100 * work-port-events bitmap and the worker thread is notified. This timeout 1101 * event will be used by the worker thread to invoke the actual timeout 1102 * handler routine, lpfc_rrq_handler. Any periodical operations will 1103 * be performed in the timeout handler and the RRQ timeout event bit shall 1104 * be cleared by the worker thread after it has taken the event bitmap out. 1105 **/ 1106 static void 1107 lpfc_rrq_timeout(unsigned long ptr) 1108 { 1109 struct lpfc_hba *phba; 1110 unsigned long iflag; 1111 1112 phba = (struct lpfc_hba *)ptr; 1113 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 1114 if (!(phba->pport->load_flag & FC_UNLOADING)) 1115 phba->hba_flag |= HBA_RRQ_ACTIVE; 1116 else 1117 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1118 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 1119 1120 if (!(phba->pport->load_flag & FC_UNLOADING)) 1121 lpfc_worker_wake_up(phba); 1122 } 1123 1124 /** 1125 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function 1126 * @phba: pointer to lpfc hba data structure. 1127 * @pmboxq: pointer to the driver internal queue element for mailbox command. 1128 * 1129 * This is the callback function to the lpfc heart-beat mailbox command. 1130 * If configured, the lpfc driver issues the heart-beat mailbox command to 1131 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the 1132 * heart-beat mailbox command is issued, the driver shall set up heart-beat 1133 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks 1134 * heart-beat outstanding state. Once the mailbox command comes back and 1135 * no error conditions detected, the heart-beat mailbox command timer is 1136 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding 1137 * state is cleared for the next heart-beat. If the timer expired with the 1138 * heart-beat outstanding state set, the driver will put the HBA offline. 1139 **/ 1140 static void 1141 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 1142 { 1143 unsigned long drvr_flag; 1144 1145 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1146 phba->hb_outstanding = 0; 1147 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1148 1149 /* Check and reset heart-beat timer is necessary */ 1150 mempool_free(pmboxq, phba->mbox_mem_pool); 1151 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) && 1152 !(phba->link_state == LPFC_HBA_ERROR) && 1153 !(phba->pport->load_flag & FC_UNLOADING)) 1154 mod_timer(&phba->hb_tmofunc, 1155 jiffies + 1156 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1157 return; 1158 } 1159 1160 /** 1161 * lpfc_hb_timeout_handler - The HBA-timer timeout handler 1162 * @phba: pointer to lpfc hba data structure. 1163 * 1164 * This is the actual HBA-timer timeout handler to be invoked by the worker 1165 * thread whenever the HBA timer fired and HBA-timeout event posted. This 1166 * handler performs any periodic operations needed for the device. If such 1167 * periodic event has already been attended to either in the interrupt handler 1168 * or by processing slow-ring or fast-ring events within the HBA-timer 1169 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets 1170 * the timer for the next timeout period. If lpfc heart-beat mailbox command 1171 * is configured and there is no heart-beat mailbox command outstanding, a 1172 * heart-beat mailbox is issued and timer set properly. Otherwise, if there 1173 * has been a heart-beat mailbox command outstanding, the HBA shall be put 1174 * to offline. 1175 **/ 1176 void 1177 lpfc_hb_timeout_handler(struct lpfc_hba *phba) 1178 { 1179 struct lpfc_vport **vports; 1180 LPFC_MBOXQ_t *pmboxq; 1181 struct lpfc_dmabuf *buf_ptr; 1182 int retval, i; 1183 struct lpfc_sli *psli = &phba->sli; 1184 LIST_HEAD(completions); 1185 1186 vports = lpfc_create_vport_work_array(phba); 1187 if (vports != NULL) 1188 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1189 lpfc_rcv_seq_check_edtov(vports[i]); 1190 lpfc_fdmi_num_disc_check(vports[i]); 1191 } 1192 lpfc_destroy_vport_work_array(phba, vports); 1193 1194 if ((phba->link_state == LPFC_HBA_ERROR) || 1195 (phba->pport->load_flag & FC_UNLOADING) || 1196 (phba->pport->fc_flag & FC_OFFLINE_MODE)) 1197 return; 1198 1199 spin_lock_irq(&phba->pport->work_port_lock); 1200 1201 if (time_after(phba->last_completion_time + 1202 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), 1203 jiffies)) { 1204 spin_unlock_irq(&phba->pport->work_port_lock); 1205 if (!phba->hb_outstanding) 1206 mod_timer(&phba->hb_tmofunc, 1207 jiffies + 1208 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1209 else 1210 mod_timer(&phba->hb_tmofunc, 1211 jiffies + 1212 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1213 return; 1214 } 1215 spin_unlock_irq(&phba->pport->work_port_lock); 1216 1217 if (phba->elsbuf_cnt && 1218 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) { 1219 spin_lock_irq(&phba->hbalock); 1220 list_splice_init(&phba->elsbuf, &completions); 1221 phba->elsbuf_cnt = 0; 1222 phba->elsbuf_prev_cnt = 0; 1223 spin_unlock_irq(&phba->hbalock); 1224 1225 while (!list_empty(&completions)) { 1226 list_remove_head(&completions, buf_ptr, 1227 struct lpfc_dmabuf, list); 1228 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 1229 kfree(buf_ptr); 1230 } 1231 } 1232 phba->elsbuf_prev_cnt = phba->elsbuf_cnt; 1233 1234 /* If there is no heart beat outstanding, issue a heartbeat command */ 1235 if (phba->cfg_enable_hba_heartbeat) { 1236 if (!phba->hb_outstanding) { 1237 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) && 1238 (list_empty(&psli->mboxq))) { 1239 pmboxq = mempool_alloc(phba->mbox_mem_pool, 1240 GFP_KERNEL); 1241 if (!pmboxq) { 1242 mod_timer(&phba->hb_tmofunc, 1243 jiffies + 1244 msecs_to_jiffies(1000 * 1245 LPFC_HB_MBOX_INTERVAL)); 1246 return; 1247 } 1248 1249 lpfc_heart_beat(phba, pmboxq); 1250 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl; 1251 pmboxq->vport = phba->pport; 1252 retval = lpfc_sli_issue_mbox(phba, pmboxq, 1253 MBX_NOWAIT); 1254 1255 if (retval != MBX_BUSY && 1256 retval != MBX_SUCCESS) { 1257 mempool_free(pmboxq, 1258 phba->mbox_mem_pool); 1259 mod_timer(&phba->hb_tmofunc, 1260 jiffies + 1261 msecs_to_jiffies(1000 * 1262 LPFC_HB_MBOX_INTERVAL)); 1263 return; 1264 } 1265 phba->skipped_hb = 0; 1266 phba->hb_outstanding = 1; 1267 } else if (time_before_eq(phba->last_completion_time, 1268 phba->skipped_hb)) { 1269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1270 "2857 Last completion time not " 1271 " updated in %d ms\n", 1272 jiffies_to_msecs(jiffies 1273 - phba->last_completion_time)); 1274 } else 1275 phba->skipped_hb = jiffies; 1276 1277 mod_timer(&phba->hb_tmofunc, 1278 jiffies + 1279 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1280 return; 1281 } else { 1282 /* 1283 * If heart beat timeout called with hb_outstanding set 1284 * we need to give the hb mailbox cmd a chance to 1285 * complete or TMO. 1286 */ 1287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1288 "0459 Adapter heartbeat still out" 1289 "standing:last compl time was %d ms.\n", 1290 jiffies_to_msecs(jiffies 1291 - phba->last_completion_time)); 1292 mod_timer(&phba->hb_tmofunc, 1293 jiffies + 1294 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); 1295 } 1296 } else { 1297 mod_timer(&phba->hb_tmofunc, 1298 jiffies + 1299 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 1300 } 1301 } 1302 1303 /** 1304 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention 1305 * @phba: pointer to lpfc hba data structure. 1306 * 1307 * This routine is called to bring the HBA offline when HBA hardware error 1308 * other than Port Error 6 has been detected. 1309 **/ 1310 static void 1311 lpfc_offline_eratt(struct lpfc_hba *phba) 1312 { 1313 struct lpfc_sli *psli = &phba->sli; 1314 1315 spin_lock_irq(&phba->hbalock); 1316 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1317 spin_unlock_irq(&phba->hbalock); 1318 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1319 1320 lpfc_offline(phba); 1321 lpfc_reset_barrier(phba); 1322 spin_lock_irq(&phba->hbalock); 1323 lpfc_sli_brdreset(phba); 1324 spin_unlock_irq(&phba->hbalock); 1325 lpfc_hba_down_post(phba); 1326 lpfc_sli_brdready(phba, HS_MBRDY); 1327 lpfc_unblock_mgmt_io(phba); 1328 phba->link_state = LPFC_HBA_ERROR; 1329 return; 1330 } 1331 1332 /** 1333 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention 1334 * @phba: pointer to lpfc hba data structure. 1335 * 1336 * This routine is called to bring a SLI4 HBA offline when HBA hardware error 1337 * other than Port Error 6 has been detected. 1338 **/ 1339 void 1340 lpfc_sli4_offline_eratt(struct lpfc_hba *phba) 1341 { 1342 spin_lock_irq(&phba->hbalock); 1343 phba->link_state = LPFC_HBA_ERROR; 1344 spin_unlock_irq(&phba->hbalock); 1345 1346 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1347 lpfc_offline(phba); 1348 lpfc_hba_down_post(phba); 1349 lpfc_unblock_mgmt_io(phba); 1350 } 1351 1352 /** 1353 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler 1354 * @phba: pointer to lpfc hba data structure. 1355 * 1356 * This routine is invoked to handle the deferred HBA hardware error 1357 * conditions. This type of error is indicated by HBA by setting ER1 1358 * and another ER bit in the host status register. The driver will 1359 * wait until the ER1 bit clears before handling the error condition. 1360 **/ 1361 static void 1362 lpfc_handle_deferred_eratt(struct lpfc_hba *phba) 1363 { 1364 uint32_t old_host_status = phba->work_hs; 1365 struct lpfc_sli *psli = &phba->sli; 1366 1367 /* If the pci channel is offline, ignore possible errors, 1368 * since we cannot communicate with the pci card anyway. 1369 */ 1370 if (pci_channel_offline(phba->pcidev)) { 1371 spin_lock_irq(&phba->hbalock); 1372 phba->hba_flag &= ~DEFER_ERATT; 1373 spin_unlock_irq(&phba->hbalock); 1374 return; 1375 } 1376 1377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1378 "0479 Deferred Adapter Hardware Error " 1379 "Data: x%x x%x x%x\n", 1380 phba->work_hs, 1381 phba->work_status[0], phba->work_status[1]); 1382 1383 spin_lock_irq(&phba->hbalock); 1384 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1385 spin_unlock_irq(&phba->hbalock); 1386 1387 1388 /* 1389 * Firmware stops when it triggred erratt. That could cause the I/Os 1390 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the 1391 * SCSI layer retry it after re-establishing link. 1392 */ 1393 lpfc_sli_abort_fcp_rings(phba); 1394 1395 /* 1396 * There was a firmware error. Take the hba offline and then 1397 * attempt to restart it. 1398 */ 1399 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 1400 lpfc_offline(phba); 1401 1402 /* Wait for the ER1 bit to clear.*/ 1403 while (phba->work_hs & HS_FFER1) { 1404 msleep(100); 1405 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) { 1406 phba->work_hs = UNPLUG_ERR ; 1407 break; 1408 } 1409 /* If driver is unloading let the worker thread continue */ 1410 if (phba->pport->load_flag & FC_UNLOADING) { 1411 phba->work_hs = 0; 1412 break; 1413 } 1414 } 1415 1416 /* 1417 * This is to ptrotect against a race condition in which 1418 * first write to the host attention register clear the 1419 * host status register. 1420 */ 1421 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) 1422 phba->work_hs = old_host_status & ~HS_FFER1; 1423 1424 spin_lock_irq(&phba->hbalock); 1425 phba->hba_flag &= ~DEFER_ERATT; 1426 spin_unlock_irq(&phba->hbalock); 1427 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 1428 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 1429 } 1430 1431 static void 1432 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) 1433 { 1434 struct lpfc_board_event_header board_event; 1435 struct Scsi_Host *shost; 1436 1437 board_event.event_type = FC_REG_BOARD_EVENT; 1438 board_event.subcategory = LPFC_EVENT_PORTINTERR; 1439 shost = lpfc_shost_from_vport(phba->pport); 1440 fc_host_post_vendor_event(shost, fc_get_event_number(), 1441 sizeof(board_event), 1442 (char *) &board_event, 1443 LPFC_NL_VENDOR_ID); 1444 } 1445 1446 /** 1447 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler 1448 * @phba: pointer to lpfc hba data structure. 1449 * 1450 * This routine is invoked to handle the following HBA hardware error 1451 * conditions: 1452 * 1 - HBA error attention interrupt 1453 * 2 - DMA ring index out of range 1454 * 3 - Mailbox command came back as unknown 1455 **/ 1456 static void 1457 lpfc_handle_eratt_s3(struct lpfc_hba *phba) 1458 { 1459 struct lpfc_vport *vport = phba->pport; 1460 struct lpfc_sli *psli = &phba->sli; 1461 uint32_t event_data; 1462 unsigned long temperature; 1463 struct temp_event temp_event_data; 1464 struct Scsi_Host *shost; 1465 1466 /* If the pci channel is offline, ignore possible errors, 1467 * since we cannot communicate with the pci card anyway. 1468 */ 1469 if (pci_channel_offline(phba->pcidev)) { 1470 spin_lock_irq(&phba->hbalock); 1471 phba->hba_flag &= ~DEFER_ERATT; 1472 spin_unlock_irq(&phba->hbalock); 1473 return; 1474 } 1475 1476 /* If resets are disabled then leave the HBA alone and return */ 1477 if (!phba->cfg_enable_hba_reset) 1478 return; 1479 1480 /* Send an internal error event to mgmt application */ 1481 lpfc_board_errevt_to_mgmt(phba); 1482 1483 if (phba->hba_flag & DEFER_ERATT) 1484 lpfc_handle_deferred_eratt(phba); 1485 1486 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { 1487 if (phba->work_hs & HS_FFER6) 1488 /* Re-establishing Link */ 1489 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1490 "1301 Re-establishing Link " 1491 "Data: x%x x%x x%x\n", 1492 phba->work_hs, phba->work_status[0], 1493 phba->work_status[1]); 1494 if (phba->work_hs & HS_FFER8) 1495 /* Device Zeroization */ 1496 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1497 "2861 Host Authentication device " 1498 "zeroization Data:x%x x%x x%x\n", 1499 phba->work_hs, phba->work_status[0], 1500 phba->work_status[1]); 1501 1502 spin_lock_irq(&phba->hbalock); 1503 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 1504 spin_unlock_irq(&phba->hbalock); 1505 1506 /* 1507 * Firmware stops when it triggled erratt with HS_FFER6. 1508 * That could cause the I/Os dropped by the firmware. 1509 * Error iocb (I/O) on txcmplq and let the SCSI layer 1510 * retry it after re-establishing link. 1511 */ 1512 lpfc_sli_abort_fcp_rings(phba); 1513 1514 /* 1515 * There was a firmware error. Take the hba offline and then 1516 * attempt to restart it. 1517 */ 1518 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 1519 lpfc_offline(phba); 1520 lpfc_sli_brdrestart(phba); 1521 if (lpfc_online(phba) == 0) { /* Initialize the HBA */ 1522 lpfc_unblock_mgmt_io(phba); 1523 return; 1524 } 1525 lpfc_unblock_mgmt_io(phba); 1526 } else if (phba->work_hs & HS_CRIT_TEMP) { 1527 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET); 1528 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1529 temp_event_data.event_code = LPFC_CRIT_TEMP; 1530 temp_event_data.data = (uint32_t)temperature; 1531 1532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1533 "0406 Adapter maximum temperature exceeded " 1534 "(%ld), taking this port offline " 1535 "Data: x%x x%x x%x\n", 1536 temperature, phba->work_hs, 1537 phba->work_status[0], phba->work_status[1]); 1538 1539 shost = lpfc_shost_from_vport(phba->pport); 1540 fc_host_post_vendor_event(shost, fc_get_event_number(), 1541 sizeof(temp_event_data), 1542 (char *) &temp_event_data, 1543 SCSI_NL_VID_TYPE_PCI 1544 | PCI_VENDOR_ID_EMULEX); 1545 1546 spin_lock_irq(&phba->hbalock); 1547 phba->over_temp_state = HBA_OVER_TEMP; 1548 spin_unlock_irq(&phba->hbalock); 1549 lpfc_offline_eratt(phba); 1550 1551 } else { 1552 /* The if clause above forces this code path when the status 1553 * failure is a value other than FFER6. Do not call the offline 1554 * twice. This is the adapter hardware error path. 1555 */ 1556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1557 "0457 Adapter Hardware Error " 1558 "Data: x%x x%x x%x\n", 1559 phba->work_hs, 1560 phba->work_status[0], phba->work_status[1]); 1561 1562 event_data = FC_REG_DUMP_EVENT; 1563 shost = lpfc_shost_from_vport(vport); 1564 fc_host_post_vendor_event(shost, fc_get_event_number(), 1565 sizeof(event_data), (char *) &event_data, 1566 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1567 1568 lpfc_offline_eratt(phba); 1569 } 1570 return; 1571 } 1572 1573 /** 1574 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg 1575 * @phba: pointer to lpfc hba data structure. 1576 * @mbx_action: flag for mailbox shutdown action. 1577 * 1578 * This routine is invoked to perform an SLI4 port PCI function reset in 1579 * response to port status register polling attention. It waits for port 1580 * status register (ERR, RDY, RN) bits before proceeding with function reset. 1581 * During this process, interrupt vectors are freed and later requested 1582 * for handling possible port resource change. 1583 **/ 1584 static int 1585 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, 1586 bool en_rn_msg) 1587 { 1588 int rc; 1589 uint32_t intr_mode; 1590 1591 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1592 LPFC_SLI_INTF_IF_TYPE_2) { 1593 /* 1594 * On error status condition, driver need to wait for port 1595 * ready before performing reset. 1596 */ 1597 rc = lpfc_sli4_pdev_status_reg_wait(phba); 1598 if (rc) 1599 return rc; 1600 } 1601 1602 /* need reset: attempt for port recovery */ 1603 if (en_rn_msg) 1604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1605 "2887 Reset Needed: Attempting Port " 1606 "Recovery...\n"); 1607 lpfc_offline_prep(phba, mbx_action); 1608 lpfc_offline(phba); 1609 /* release interrupt for possible resource change */ 1610 lpfc_sli4_disable_intr(phba); 1611 lpfc_sli_brdrestart(phba); 1612 /* request and enable interrupt */ 1613 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 1614 if (intr_mode == LPFC_INTR_ERROR) { 1615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1616 "3175 Failed to enable interrupt\n"); 1617 return -EIO; 1618 } 1619 phba->intr_mode = intr_mode; 1620 rc = lpfc_online(phba); 1621 if (rc == 0) 1622 lpfc_unblock_mgmt_io(phba); 1623 1624 return rc; 1625 } 1626 1627 /** 1628 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler 1629 * @phba: pointer to lpfc hba data structure. 1630 * 1631 * This routine is invoked to handle the SLI4 HBA hardware error attention 1632 * conditions. 1633 **/ 1634 static void 1635 lpfc_handle_eratt_s4(struct lpfc_hba *phba) 1636 { 1637 struct lpfc_vport *vport = phba->pport; 1638 uint32_t event_data; 1639 struct Scsi_Host *shost; 1640 uint32_t if_type; 1641 struct lpfc_register portstat_reg = {0}; 1642 uint32_t reg_err1, reg_err2; 1643 uint32_t uerrlo_reg, uemasklo_reg; 1644 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2; 1645 bool en_rn_msg = true; 1646 struct temp_event temp_event_data; 1647 struct lpfc_register portsmphr_reg; 1648 int rc, i; 1649 1650 /* If the pci channel is offline, ignore possible errors, since 1651 * we cannot communicate with the pci card anyway. 1652 */ 1653 if (pci_channel_offline(phba->pcidev)) 1654 return; 1655 1656 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 1657 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 1658 switch (if_type) { 1659 case LPFC_SLI_INTF_IF_TYPE_0: 1660 pci_rd_rc1 = lpfc_readl( 1661 phba->sli4_hba.u.if_type0.UERRLOregaddr, 1662 &uerrlo_reg); 1663 pci_rd_rc2 = lpfc_readl( 1664 phba->sli4_hba.u.if_type0.UEMASKLOregaddr, 1665 &uemasklo_reg); 1666 /* consider PCI bus read error as pci_channel_offline */ 1667 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO) 1668 return; 1669 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) { 1670 lpfc_sli4_offline_eratt(phba); 1671 return; 1672 } 1673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1674 "7623 Checking UE recoverable"); 1675 1676 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) { 1677 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1678 &portsmphr_reg.word0)) 1679 continue; 1680 1681 smphr_port_status = bf_get(lpfc_port_smphr_port_status, 1682 &portsmphr_reg); 1683 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1684 LPFC_PORT_SEM_UE_RECOVERABLE) 1685 break; 1686 /*Sleep for 1Sec, before checking SEMAPHORE */ 1687 msleep(1000); 1688 } 1689 1690 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1691 "4827 smphr_port_status x%x : Waited %dSec", 1692 smphr_port_status, i); 1693 1694 /* Recoverable UE, reset the HBA device */ 1695 if ((smphr_port_status & LPFC_PORT_SEM_MASK) == 1696 LPFC_PORT_SEM_UE_RECOVERABLE) { 1697 for (i = 0; i < 20; i++) { 1698 msleep(1000); 1699 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 1700 &portsmphr_reg.word0) && 1701 (LPFC_POST_STAGE_PORT_READY == 1702 bf_get(lpfc_port_smphr_port_status, 1703 &portsmphr_reg))) { 1704 rc = lpfc_sli4_port_sta_fn_reset(phba, 1705 LPFC_MBX_NO_WAIT, en_rn_msg); 1706 if (rc == 0) 1707 return; 1708 lpfc_printf_log(phba, 1709 KERN_ERR, LOG_INIT, 1710 "4215 Failed to recover UE"); 1711 break; 1712 } 1713 } 1714 } 1715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1716 "7624 Firmware not ready: Failing UE recovery," 1717 " waited %dSec", i); 1718 lpfc_sli4_offline_eratt(phba); 1719 break; 1720 1721 case LPFC_SLI_INTF_IF_TYPE_2: 1722 pci_rd_rc1 = lpfc_readl( 1723 phba->sli4_hba.u.if_type2.STATUSregaddr, 1724 &portstat_reg.word0); 1725 /* consider PCI bus read error as pci_channel_offline */ 1726 if (pci_rd_rc1 == -EIO) { 1727 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1728 "3151 PCI bus read access failure: x%x\n", 1729 readl(phba->sli4_hba.u.if_type2.STATUSregaddr)); 1730 return; 1731 } 1732 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1733 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1734 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1736 "2889 Port Overtemperature event, " 1737 "taking port offline Data: x%x x%x\n", 1738 reg_err1, reg_err2); 1739 1740 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 1741 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 1742 temp_event_data.event_code = LPFC_CRIT_TEMP; 1743 temp_event_data.data = 0xFFFFFFFF; 1744 1745 shost = lpfc_shost_from_vport(phba->pport); 1746 fc_host_post_vendor_event(shost, fc_get_event_number(), 1747 sizeof(temp_event_data), 1748 (char *)&temp_event_data, 1749 SCSI_NL_VID_TYPE_PCI 1750 | PCI_VENDOR_ID_EMULEX); 1751 1752 spin_lock_irq(&phba->hbalock); 1753 phba->over_temp_state = HBA_OVER_TEMP; 1754 spin_unlock_irq(&phba->hbalock); 1755 lpfc_sli4_offline_eratt(phba); 1756 return; 1757 } 1758 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1759 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { 1760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1761 "3143 Port Down: Firmware Update " 1762 "Detected\n"); 1763 en_rn_msg = false; 1764 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1765 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1767 "3144 Port Down: Debug Dump\n"); 1768 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1769 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON) 1770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1771 "3145 Port Down: Provisioning\n"); 1772 1773 /* If resets are disabled then leave the HBA alone and return */ 1774 if (!phba->cfg_enable_hba_reset) 1775 return; 1776 1777 /* Check port status register for function reset */ 1778 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, 1779 en_rn_msg); 1780 if (rc == 0) { 1781 /* don't report event on forced debug dump */ 1782 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && 1783 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) 1784 return; 1785 else 1786 break; 1787 } 1788 /* fall through for not able to recover */ 1789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1790 "3152 Unrecoverable error, bring the port " 1791 "offline\n"); 1792 lpfc_sli4_offline_eratt(phba); 1793 break; 1794 case LPFC_SLI_INTF_IF_TYPE_1: 1795 default: 1796 break; 1797 } 1798 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 1799 "3123 Report dump event to upper layer\n"); 1800 /* Send an internal error event to mgmt application */ 1801 lpfc_board_errevt_to_mgmt(phba); 1802 1803 event_data = FC_REG_DUMP_EVENT; 1804 shost = lpfc_shost_from_vport(vport); 1805 fc_host_post_vendor_event(shost, fc_get_event_number(), 1806 sizeof(event_data), (char *) &event_data, 1807 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 1808 } 1809 1810 /** 1811 * lpfc_handle_eratt - Wrapper func for handling hba error attention 1812 * @phba: pointer to lpfc HBA data structure. 1813 * 1814 * This routine wraps the actual SLI3 or SLI4 hba error attention handling 1815 * routine from the API jump table function pointer from the lpfc_hba struct. 1816 * 1817 * Return codes 1818 * 0 - success. 1819 * Any other value - error. 1820 **/ 1821 void 1822 lpfc_handle_eratt(struct lpfc_hba *phba) 1823 { 1824 (*phba->lpfc_handle_eratt)(phba); 1825 } 1826 1827 /** 1828 * lpfc_handle_latt - The HBA link event handler 1829 * @phba: pointer to lpfc hba data structure. 1830 * 1831 * This routine is invoked from the worker thread to handle a HBA host 1832 * attention link event. 1833 **/ 1834 void 1835 lpfc_handle_latt(struct lpfc_hba *phba) 1836 { 1837 struct lpfc_vport *vport = phba->pport; 1838 struct lpfc_sli *psli = &phba->sli; 1839 LPFC_MBOXQ_t *pmb; 1840 volatile uint32_t control; 1841 struct lpfc_dmabuf *mp; 1842 int rc = 0; 1843 1844 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1845 if (!pmb) { 1846 rc = 1; 1847 goto lpfc_handle_latt_err_exit; 1848 } 1849 1850 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 1851 if (!mp) { 1852 rc = 2; 1853 goto lpfc_handle_latt_free_pmb; 1854 } 1855 1856 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 1857 if (!mp->virt) { 1858 rc = 3; 1859 goto lpfc_handle_latt_free_mp; 1860 } 1861 1862 /* Cleanup any outstanding ELS commands */ 1863 lpfc_els_flush_all_cmd(phba); 1864 1865 psli->slistat.link_event++; 1866 lpfc_read_topology(phba, pmb, mp); 1867 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 1868 pmb->vport = vport; 1869 /* Block ELS IOCBs until we have processed this mbox command */ 1870 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 1871 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT); 1872 if (rc == MBX_NOT_FINISHED) { 1873 rc = 4; 1874 goto lpfc_handle_latt_free_mbuf; 1875 } 1876 1877 /* Clear Link Attention in HA REG */ 1878 spin_lock_irq(&phba->hbalock); 1879 writel(HA_LATT, phba->HAregaddr); 1880 readl(phba->HAregaddr); /* flush */ 1881 spin_unlock_irq(&phba->hbalock); 1882 1883 return; 1884 1885 lpfc_handle_latt_free_mbuf: 1886 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1887 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1888 lpfc_handle_latt_free_mp: 1889 kfree(mp); 1890 lpfc_handle_latt_free_pmb: 1891 mempool_free(pmb, phba->mbox_mem_pool); 1892 lpfc_handle_latt_err_exit: 1893 /* Enable Link attention interrupts */ 1894 spin_lock_irq(&phba->hbalock); 1895 psli->sli_flag |= LPFC_PROCESS_LA; 1896 control = readl(phba->HCregaddr); 1897 control |= HC_LAINT_ENA; 1898 writel(control, phba->HCregaddr); 1899 readl(phba->HCregaddr); /* flush */ 1900 1901 /* Clear Link Attention in HA REG */ 1902 writel(HA_LATT, phba->HAregaddr); 1903 readl(phba->HAregaddr); /* flush */ 1904 spin_unlock_irq(&phba->hbalock); 1905 lpfc_linkdown(phba); 1906 phba->link_state = LPFC_HBA_ERROR; 1907 1908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1909 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc); 1910 1911 return; 1912 } 1913 1914 /** 1915 * lpfc_parse_vpd - Parse VPD (Vital Product Data) 1916 * @phba: pointer to lpfc hba data structure. 1917 * @vpd: pointer to the vital product data. 1918 * @len: length of the vital product data in bytes. 1919 * 1920 * This routine parses the Vital Product Data (VPD). The VPD is treated as 1921 * an array of characters. In this routine, the ModelName, ProgramType, and 1922 * ModelDesc, etc. fields of the phba data structure will be populated. 1923 * 1924 * Return codes 1925 * 0 - pointer to the VPD passed in is NULL 1926 * 1 - success 1927 **/ 1928 int 1929 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) 1930 { 1931 uint8_t lenlo, lenhi; 1932 int Length; 1933 int i, j; 1934 int finished = 0; 1935 int index = 0; 1936 1937 if (!vpd) 1938 return 0; 1939 1940 /* Vital Product */ 1941 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 1942 "0455 Vital Product Data: x%x x%x x%x x%x\n", 1943 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 1944 (uint32_t) vpd[3]); 1945 while (!finished && (index < (len - 4))) { 1946 switch (vpd[index]) { 1947 case 0x82: 1948 case 0x91: 1949 index += 1; 1950 lenlo = vpd[index]; 1951 index += 1; 1952 lenhi = vpd[index]; 1953 index += 1; 1954 i = ((((unsigned short)lenhi) << 8) + lenlo); 1955 index += i; 1956 break; 1957 case 0x90: 1958 index += 1; 1959 lenlo = vpd[index]; 1960 index += 1; 1961 lenhi = vpd[index]; 1962 index += 1; 1963 Length = ((((unsigned short)lenhi) << 8) + lenlo); 1964 if (Length > len - index) 1965 Length = len - index; 1966 while (Length > 0) { 1967 /* Look for Serial Number */ 1968 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) { 1969 index += 2; 1970 i = vpd[index]; 1971 index += 1; 1972 j = 0; 1973 Length -= (3+i); 1974 while(i--) { 1975 phba->SerialNumber[j++] = vpd[index++]; 1976 if (j == 31) 1977 break; 1978 } 1979 phba->SerialNumber[j] = 0; 1980 continue; 1981 } 1982 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) { 1983 phba->vpd_flag |= VPD_MODEL_DESC; 1984 index += 2; 1985 i = vpd[index]; 1986 index += 1; 1987 j = 0; 1988 Length -= (3+i); 1989 while(i--) { 1990 phba->ModelDesc[j++] = vpd[index++]; 1991 if (j == 255) 1992 break; 1993 } 1994 phba->ModelDesc[j] = 0; 1995 continue; 1996 } 1997 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) { 1998 phba->vpd_flag |= VPD_MODEL_NAME; 1999 index += 2; 2000 i = vpd[index]; 2001 index += 1; 2002 j = 0; 2003 Length -= (3+i); 2004 while(i--) { 2005 phba->ModelName[j++] = vpd[index++]; 2006 if (j == 79) 2007 break; 2008 } 2009 phba->ModelName[j] = 0; 2010 continue; 2011 } 2012 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) { 2013 phba->vpd_flag |= VPD_PROGRAM_TYPE; 2014 index += 2; 2015 i = vpd[index]; 2016 index += 1; 2017 j = 0; 2018 Length -= (3+i); 2019 while(i--) { 2020 phba->ProgramType[j++] = vpd[index++]; 2021 if (j == 255) 2022 break; 2023 } 2024 phba->ProgramType[j] = 0; 2025 continue; 2026 } 2027 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) { 2028 phba->vpd_flag |= VPD_PORT; 2029 index += 2; 2030 i = vpd[index]; 2031 index += 1; 2032 j = 0; 2033 Length -= (3+i); 2034 while(i--) { 2035 if ((phba->sli_rev == LPFC_SLI_REV4) && 2036 (phba->sli4_hba.pport_name_sta == 2037 LPFC_SLI4_PPNAME_GET)) { 2038 j++; 2039 index++; 2040 } else 2041 phba->Port[j++] = vpd[index++]; 2042 if (j == 19) 2043 break; 2044 } 2045 if ((phba->sli_rev != LPFC_SLI_REV4) || 2046 (phba->sli4_hba.pport_name_sta == 2047 LPFC_SLI4_PPNAME_NON)) 2048 phba->Port[j] = 0; 2049 continue; 2050 } 2051 else { 2052 index += 2; 2053 i = vpd[index]; 2054 index += 1; 2055 index += i; 2056 Length -= (3 + i); 2057 } 2058 } 2059 finished = 0; 2060 break; 2061 case 0x78: 2062 finished = 1; 2063 break; 2064 default: 2065 index ++; 2066 break; 2067 } 2068 } 2069 2070 return(1); 2071 } 2072 2073 /** 2074 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description 2075 * @phba: pointer to lpfc hba data structure. 2076 * @mdp: pointer to the data structure to hold the derived model name. 2077 * @descp: pointer to the data structure to hold the derived description. 2078 * 2079 * This routine retrieves HBA's description based on its registered PCI device 2080 * ID. The @descp passed into this function points to an array of 256 chars. It 2081 * shall be returned with the model name, maximum speed, and the host bus type. 2082 * The @mdp passed into this function points to an array of 80 chars. When the 2083 * function returns, the @mdp will be filled with the model name. 2084 **/ 2085 static void 2086 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) 2087 { 2088 lpfc_vpd_t *vp; 2089 uint16_t dev_id = phba->pcidev->device; 2090 int max_speed; 2091 int GE = 0; 2092 int oneConnect = 0; /* default is not a oneConnect */ 2093 struct { 2094 char *name; 2095 char *bus; 2096 char *function; 2097 } m = {"<Unknown>", "", ""}; 2098 2099 if (mdp && mdp[0] != '\0' 2100 && descp && descp[0] != '\0') 2101 return; 2102 2103 if (phba->lmt & LMT_32Gb) 2104 max_speed = 32; 2105 else if (phba->lmt & LMT_16Gb) 2106 max_speed = 16; 2107 else if (phba->lmt & LMT_10Gb) 2108 max_speed = 10; 2109 else if (phba->lmt & LMT_8Gb) 2110 max_speed = 8; 2111 else if (phba->lmt & LMT_4Gb) 2112 max_speed = 4; 2113 else if (phba->lmt & LMT_2Gb) 2114 max_speed = 2; 2115 else if (phba->lmt & LMT_1Gb) 2116 max_speed = 1; 2117 else 2118 max_speed = 0; 2119 2120 vp = &phba->vpd; 2121 2122 switch (dev_id) { 2123 case PCI_DEVICE_ID_FIREFLY: 2124 m = (typeof(m)){"LP6000", "PCI", 2125 "Obsolete, Unsupported Fibre Channel Adapter"}; 2126 break; 2127 case PCI_DEVICE_ID_SUPERFLY: 2128 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) 2129 m = (typeof(m)){"LP7000", "PCI", ""}; 2130 else 2131 m = (typeof(m)){"LP7000E", "PCI", ""}; 2132 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2133 break; 2134 case PCI_DEVICE_ID_DRAGONFLY: 2135 m = (typeof(m)){"LP8000", "PCI", 2136 "Obsolete, Unsupported Fibre Channel Adapter"}; 2137 break; 2138 case PCI_DEVICE_ID_CENTAUR: 2139 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) 2140 m = (typeof(m)){"LP9002", "PCI", ""}; 2141 else 2142 m = (typeof(m)){"LP9000", "PCI", ""}; 2143 m.function = "Obsolete, Unsupported Fibre Channel Adapter"; 2144 break; 2145 case PCI_DEVICE_ID_RFLY: 2146 m = (typeof(m)){"LP952", "PCI", 2147 "Obsolete, Unsupported Fibre Channel Adapter"}; 2148 break; 2149 case PCI_DEVICE_ID_PEGASUS: 2150 m = (typeof(m)){"LP9802", "PCI-X", 2151 "Obsolete, Unsupported Fibre Channel Adapter"}; 2152 break; 2153 case PCI_DEVICE_ID_THOR: 2154 m = (typeof(m)){"LP10000", "PCI-X", 2155 "Obsolete, Unsupported Fibre Channel Adapter"}; 2156 break; 2157 case PCI_DEVICE_ID_VIPER: 2158 m = (typeof(m)){"LPX1000", "PCI-X", 2159 "Obsolete, Unsupported Fibre Channel Adapter"}; 2160 break; 2161 case PCI_DEVICE_ID_PFLY: 2162 m = (typeof(m)){"LP982", "PCI-X", 2163 "Obsolete, Unsupported Fibre Channel Adapter"}; 2164 break; 2165 case PCI_DEVICE_ID_TFLY: 2166 m = (typeof(m)){"LP1050", "PCI-X", 2167 "Obsolete, Unsupported Fibre Channel Adapter"}; 2168 break; 2169 case PCI_DEVICE_ID_HELIOS: 2170 m = (typeof(m)){"LP11000", "PCI-X2", 2171 "Obsolete, Unsupported Fibre Channel Adapter"}; 2172 break; 2173 case PCI_DEVICE_ID_HELIOS_SCSP: 2174 m = (typeof(m)){"LP11000-SP", "PCI-X2", 2175 "Obsolete, Unsupported Fibre Channel Adapter"}; 2176 break; 2177 case PCI_DEVICE_ID_HELIOS_DCSP: 2178 m = (typeof(m)){"LP11002-SP", "PCI-X2", 2179 "Obsolete, Unsupported Fibre Channel Adapter"}; 2180 break; 2181 case PCI_DEVICE_ID_NEPTUNE: 2182 m = (typeof(m)){"LPe1000", "PCIe", 2183 "Obsolete, Unsupported Fibre Channel Adapter"}; 2184 break; 2185 case PCI_DEVICE_ID_NEPTUNE_SCSP: 2186 m = (typeof(m)){"LPe1000-SP", "PCIe", 2187 "Obsolete, Unsupported Fibre Channel Adapter"}; 2188 break; 2189 case PCI_DEVICE_ID_NEPTUNE_DCSP: 2190 m = (typeof(m)){"LPe1002-SP", "PCIe", 2191 "Obsolete, Unsupported Fibre Channel Adapter"}; 2192 break; 2193 case PCI_DEVICE_ID_BMID: 2194 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; 2195 break; 2196 case PCI_DEVICE_ID_BSMB: 2197 m = (typeof(m)){"LP111", "PCI-X2", 2198 "Obsolete, Unsupported Fibre Channel Adapter"}; 2199 break; 2200 case PCI_DEVICE_ID_ZEPHYR: 2201 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2202 break; 2203 case PCI_DEVICE_ID_ZEPHYR_SCSP: 2204 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; 2205 break; 2206 case PCI_DEVICE_ID_ZEPHYR_DCSP: 2207 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"}; 2208 GE = 1; 2209 break; 2210 case PCI_DEVICE_ID_ZMID: 2211 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"}; 2212 break; 2213 case PCI_DEVICE_ID_ZSMB: 2214 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; 2215 break; 2216 case PCI_DEVICE_ID_LP101: 2217 m = (typeof(m)){"LP101", "PCI-X", 2218 "Obsolete, Unsupported Fibre Channel Adapter"}; 2219 break; 2220 case PCI_DEVICE_ID_LP10000S: 2221 m = (typeof(m)){"LP10000-S", "PCI", 2222 "Obsolete, Unsupported Fibre Channel Adapter"}; 2223 break; 2224 case PCI_DEVICE_ID_LP11000S: 2225 m = (typeof(m)){"LP11000-S", "PCI-X2", 2226 "Obsolete, Unsupported Fibre Channel Adapter"}; 2227 break; 2228 case PCI_DEVICE_ID_LPE11000S: 2229 m = (typeof(m)){"LPe11000-S", "PCIe", 2230 "Obsolete, Unsupported Fibre Channel Adapter"}; 2231 break; 2232 case PCI_DEVICE_ID_SAT: 2233 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; 2234 break; 2235 case PCI_DEVICE_ID_SAT_MID: 2236 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"}; 2237 break; 2238 case PCI_DEVICE_ID_SAT_SMB: 2239 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"}; 2240 break; 2241 case PCI_DEVICE_ID_SAT_DCSP: 2242 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"}; 2243 break; 2244 case PCI_DEVICE_ID_SAT_SCSP: 2245 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"}; 2246 break; 2247 case PCI_DEVICE_ID_SAT_S: 2248 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; 2249 break; 2250 case PCI_DEVICE_ID_HORNET: 2251 m = (typeof(m)){"LP21000", "PCIe", 2252 "Obsolete, Unsupported FCoE Adapter"}; 2253 GE = 1; 2254 break; 2255 case PCI_DEVICE_ID_PROTEUS_VF: 2256 m = (typeof(m)){"LPev12000", "PCIe IOV", 2257 "Obsolete, Unsupported Fibre Channel Adapter"}; 2258 break; 2259 case PCI_DEVICE_ID_PROTEUS_PF: 2260 m = (typeof(m)){"LPev12000", "PCIe IOV", 2261 "Obsolete, Unsupported Fibre Channel Adapter"}; 2262 break; 2263 case PCI_DEVICE_ID_PROTEUS_S: 2264 m = (typeof(m)){"LPemv12002-S", "PCIe IOV", 2265 "Obsolete, Unsupported Fibre Channel Adapter"}; 2266 break; 2267 case PCI_DEVICE_ID_TIGERSHARK: 2268 oneConnect = 1; 2269 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"}; 2270 break; 2271 case PCI_DEVICE_ID_TOMCAT: 2272 oneConnect = 1; 2273 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"}; 2274 break; 2275 case PCI_DEVICE_ID_FALCON: 2276 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe", 2277 "EmulexSecure Fibre"}; 2278 break; 2279 case PCI_DEVICE_ID_BALIUS: 2280 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", 2281 "Obsolete, Unsupported Fibre Channel Adapter"}; 2282 break; 2283 case PCI_DEVICE_ID_LANCER_FC: 2284 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; 2285 break; 2286 case PCI_DEVICE_ID_LANCER_FC_VF: 2287 m = (typeof(m)){"LPe16000", "PCIe", 2288 "Obsolete, Unsupported Fibre Channel Adapter"}; 2289 break; 2290 case PCI_DEVICE_ID_LANCER_FCOE: 2291 oneConnect = 1; 2292 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2293 break; 2294 case PCI_DEVICE_ID_LANCER_FCOE_VF: 2295 oneConnect = 1; 2296 m = (typeof(m)){"OCe15100", "PCIe", 2297 "Obsolete, Unsupported FCoE"}; 2298 break; 2299 case PCI_DEVICE_ID_LANCER_G6_FC: 2300 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"}; 2301 break; 2302 case PCI_DEVICE_ID_SKYHAWK: 2303 case PCI_DEVICE_ID_SKYHAWK_VF: 2304 oneConnect = 1; 2305 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"}; 2306 break; 2307 default: 2308 m = (typeof(m)){"Unknown", "", ""}; 2309 break; 2310 } 2311 2312 if (mdp && mdp[0] == '\0') 2313 snprintf(mdp, 79,"%s", m.name); 2314 /* 2315 * oneConnect hba requires special processing, they are all initiators 2316 * and we put the port number on the end 2317 */ 2318 if (descp && descp[0] == '\0') { 2319 if (oneConnect) 2320 snprintf(descp, 255, 2321 "Emulex OneConnect %s, %s Initiator %s", 2322 m.name, m.function, 2323 phba->Port); 2324 else if (max_speed == 0) 2325 snprintf(descp, 255, 2326 "Emulex %s %s %s", 2327 m.name, m.bus, m.function); 2328 else 2329 snprintf(descp, 255, 2330 "Emulex %s %d%s %s %s", 2331 m.name, max_speed, (GE) ? "GE" : "Gb", 2332 m.bus, m.function); 2333 } 2334 } 2335 2336 /** 2337 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring 2338 * @phba: pointer to lpfc hba data structure. 2339 * @pring: pointer to a IOCB ring. 2340 * @cnt: the number of IOCBs to be posted to the IOCB ring. 2341 * 2342 * This routine posts a given number of IOCBs with the associated DMA buffer 2343 * descriptors specified by the cnt argument to the given IOCB ring. 2344 * 2345 * Return codes 2346 * The number of IOCBs NOT able to be posted to the IOCB ring. 2347 **/ 2348 int 2349 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) 2350 { 2351 IOCB_t *icmd; 2352 struct lpfc_iocbq *iocb; 2353 struct lpfc_dmabuf *mp1, *mp2; 2354 2355 cnt += pring->missbufcnt; 2356 2357 /* While there are buffers to post */ 2358 while (cnt > 0) { 2359 /* Allocate buffer for command iocb */ 2360 iocb = lpfc_sli_get_iocbq(phba); 2361 if (iocb == NULL) { 2362 pring->missbufcnt = cnt; 2363 return cnt; 2364 } 2365 icmd = &iocb->iocb; 2366 2367 /* 2 buffers can be posted per command */ 2368 /* Allocate buffer to post */ 2369 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2370 if (mp1) 2371 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys); 2372 if (!mp1 || !mp1->virt) { 2373 kfree(mp1); 2374 lpfc_sli_release_iocbq(phba, iocb); 2375 pring->missbufcnt = cnt; 2376 return cnt; 2377 } 2378 2379 INIT_LIST_HEAD(&mp1->list); 2380 /* Allocate buffer to post */ 2381 if (cnt > 1) { 2382 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 2383 if (mp2) 2384 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 2385 &mp2->phys); 2386 if (!mp2 || !mp2->virt) { 2387 kfree(mp2); 2388 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2389 kfree(mp1); 2390 lpfc_sli_release_iocbq(phba, iocb); 2391 pring->missbufcnt = cnt; 2392 return cnt; 2393 } 2394 2395 INIT_LIST_HEAD(&mp2->list); 2396 } else { 2397 mp2 = NULL; 2398 } 2399 2400 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys); 2401 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys); 2402 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE; 2403 icmd->ulpBdeCount = 1; 2404 cnt--; 2405 if (mp2) { 2406 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys); 2407 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys); 2408 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE; 2409 cnt--; 2410 icmd->ulpBdeCount = 2; 2411 } 2412 2413 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 2414 icmd->ulpLe = 1; 2415 2416 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == 2417 IOCB_ERROR) { 2418 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 2419 kfree(mp1); 2420 cnt++; 2421 if (mp2) { 2422 lpfc_mbuf_free(phba, mp2->virt, mp2->phys); 2423 kfree(mp2); 2424 cnt++; 2425 } 2426 lpfc_sli_release_iocbq(phba, iocb); 2427 pring->missbufcnt = cnt; 2428 return cnt; 2429 } 2430 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 2431 if (mp2) 2432 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 2433 } 2434 pring->missbufcnt = 0; 2435 return 0; 2436 } 2437 2438 /** 2439 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring 2440 * @phba: pointer to lpfc hba data structure. 2441 * 2442 * This routine posts initial receive IOCB buffers to the ELS ring. The 2443 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is 2444 * set to 64 IOCBs. 2445 * 2446 * Return codes 2447 * 0 - success (currently always success) 2448 **/ 2449 static int 2450 lpfc_post_rcv_buf(struct lpfc_hba *phba) 2451 { 2452 struct lpfc_sli *psli = &phba->sli; 2453 2454 /* Ring 0, ELS / CT buffers */ 2455 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0); 2456 /* Ring 2 - FCP no buffers needed */ 2457 2458 return 0; 2459 } 2460 2461 #define S(N,V) (((V)<<(N))|((V)>>(32-(N)))) 2462 2463 /** 2464 * lpfc_sha_init - Set up initial array of hash table entries 2465 * @HashResultPointer: pointer to an array as hash table. 2466 * 2467 * This routine sets up the initial values to the array of hash table entries 2468 * for the LC HBAs. 2469 **/ 2470 static void 2471 lpfc_sha_init(uint32_t * HashResultPointer) 2472 { 2473 HashResultPointer[0] = 0x67452301; 2474 HashResultPointer[1] = 0xEFCDAB89; 2475 HashResultPointer[2] = 0x98BADCFE; 2476 HashResultPointer[3] = 0x10325476; 2477 HashResultPointer[4] = 0xC3D2E1F0; 2478 } 2479 2480 /** 2481 * lpfc_sha_iterate - Iterate initial hash table with the working hash table 2482 * @HashResultPointer: pointer to an initial/result hash table. 2483 * @HashWorkingPointer: pointer to an working hash table. 2484 * 2485 * This routine iterates an initial hash table pointed by @HashResultPointer 2486 * with the values from the working hash table pointeed by @HashWorkingPointer. 2487 * The results are putting back to the initial hash table, returned through 2488 * the @HashResultPointer as the result hash table. 2489 **/ 2490 static void 2491 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer) 2492 { 2493 int t; 2494 uint32_t TEMP; 2495 uint32_t A, B, C, D, E; 2496 t = 16; 2497 do { 2498 HashWorkingPointer[t] = 2499 S(1, 2500 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t - 2501 8] ^ 2502 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]); 2503 } while (++t <= 79); 2504 t = 0; 2505 A = HashResultPointer[0]; 2506 B = HashResultPointer[1]; 2507 C = HashResultPointer[2]; 2508 D = HashResultPointer[3]; 2509 E = HashResultPointer[4]; 2510 2511 do { 2512 if (t < 20) { 2513 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999; 2514 } else if (t < 40) { 2515 TEMP = (B ^ C ^ D) + 0x6ED9EBA1; 2516 } else if (t < 60) { 2517 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC; 2518 } else { 2519 TEMP = (B ^ C ^ D) + 0xCA62C1D6; 2520 } 2521 TEMP += S(5, A) + E + HashWorkingPointer[t]; 2522 E = D; 2523 D = C; 2524 C = S(30, B); 2525 B = A; 2526 A = TEMP; 2527 } while (++t <= 79); 2528 2529 HashResultPointer[0] += A; 2530 HashResultPointer[1] += B; 2531 HashResultPointer[2] += C; 2532 HashResultPointer[3] += D; 2533 HashResultPointer[4] += E; 2534 2535 } 2536 2537 /** 2538 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA 2539 * @RandomChallenge: pointer to the entry of host challenge random number array. 2540 * @HashWorking: pointer to the entry of the working hash array. 2541 * 2542 * This routine calculates the working hash array referred by @HashWorking 2543 * from the challenge random numbers associated with the host, referred by 2544 * @RandomChallenge. The result is put into the entry of the working hash 2545 * array and returned by reference through @HashWorking. 2546 **/ 2547 static void 2548 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking) 2549 { 2550 *HashWorking = (*RandomChallenge ^ *HashWorking); 2551 } 2552 2553 /** 2554 * lpfc_hba_init - Perform special handling for LC HBA initialization 2555 * @phba: pointer to lpfc hba data structure. 2556 * @hbainit: pointer to an array of unsigned 32-bit integers. 2557 * 2558 * This routine performs the special handling for LC HBA initialization. 2559 **/ 2560 void 2561 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit) 2562 { 2563 int t; 2564 uint32_t *HashWorking; 2565 uint32_t *pwwnn = (uint32_t *) phba->wwnn; 2566 2567 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL); 2568 if (!HashWorking) 2569 return; 2570 2571 HashWorking[0] = HashWorking[78] = *pwwnn++; 2572 HashWorking[1] = HashWorking[79] = *pwwnn; 2573 2574 for (t = 0; t < 7; t++) 2575 lpfc_challenge_key(phba->RandomData + t, HashWorking + t); 2576 2577 lpfc_sha_init(hbainit); 2578 lpfc_sha_iterate(hbainit, HashWorking); 2579 kfree(HashWorking); 2580 } 2581 2582 /** 2583 * lpfc_cleanup - Performs vport cleanups before deleting a vport 2584 * @vport: pointer to a virtual N_Port data structure. 2585 * 2586 * This routine performs the necessary cleanups before deleting the @vport. 2587 * It invokes the discovery state machine to perform necessary state 2588 * transitions and to release the ndlps associated with the @vport. Note, 2589 * the physical port is treated as @vport 0. 2590 **/ 2591 void 2592 lpfc_cleanup(struct lpfc_vport *vport) 2593 { 2594 struct lpfc_hba *phba = vport->phba; 2595 struct lpfc_nodelist *ndlp, *next_ndlp; 2596 int i = 0; 2597 2598 if (phba->link_state > LPFC_LINK_DOWN) 2599 lpfc_port_link_failure(vport); 2600 2601 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2602 if (!NLP_CHK_NODE_ACT(ndlp)) { 2603 ndlp = lpfc_enable_node(vport, ndlp, 2604 NLP_STE_UNUSED_NODE); 2605 if (!ndlp) 2606 continue; 2607 spin_lock_irq(&phba->ndlp_lock); 2608 NLP_SET_FREE_REQ(ndlp); 2609 spin_unlock_irq(&phba->ndlp_lock); 2610 /* Trigger the release of the ndlp memory */ 2611 lpfc_nlp_put(ndlp); 2612 continue; 2613 } 2614 spin_lock_irq(&phba->ndlp_lock); 2615 if (NLP_CHK_FREE_REQ(ndlp)) { 2616 /* The ndlp should not be in memory free mode already */ 2617 spin_unlock_irq(&phba->ndlp_lock); 2618 continue; 2619 } else 2620 /* Indicate request for freeing ndlp memory */ 2621 NLP_SET_FREE_REQ(ndlp); 2622 spin_unlock_irq(&phba->ndlp_lock); 2623 2624 if (vport->port_type != LPFC_PHYSICAL_PORT && 2625 ndlp->nlp_DID == Fabric_DID) { 2626 /* Just free up ndlp with Fabric_DID for vports */ 2627 lpfc_nlp_put(ndlp); 2628 continue; 2629 } 2630 2631 /* take care of nodes in unused state before the state 2632 * machine taking action. 2633 */ 2634 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 2635 lpfc_nlp_put(ndlp); 2636 continue; 2637 } 2638 2639 if (ndlp->nlp_type & NLP_FABRIC) 2640 lpfc_disc_state_machine(vport, ndlp, NULL, 2641 NLP_EVT_DEVICE_RECOVERY); 2642 2643 lpfc_disc_state_machine(vport, ndlp, NULL, 2644 NLP_EVT_DEVICE_RM); 2645 } 2646 2647 /* At this point, ALL ndlp's should be gone 2648 * because of the previous NLP_EVT_DEVICE_RM. 2649 * Lets wait for this to happen, if needed. 2650 */ 2651 while (!list_empty(&vport->fc_nodes)) { 2652 if (i++ > 3000) { 2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 2654 "0233 Nodelist not empty\n"); 2655 list_for_each_entry_safe(ndlp, next_ndlp, 2656 &vport->fc_nodes, nlp_listp) { 2657 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 2658 LOG_NODE, 2659 "0282 did:x%x ndlp:x%p " 2660 "usgmap:x%x refcnt:%d\n", 2661 ndlp->nlp_DID, (void *)ndlp, 2662 ndlp->nlp_usg_map, 2663 atomic_read( 2664 &ndlp->kref.refcount)); 2665 } 2666 break; 2667 } 2668 2669 /* Wait for any activity on ndlps to settle */ 2670 msleep(10); 2671 } 2672 lpfc_cleanup_vports_rrqs(vport, NULL); 2673 } 2674 2675 /** 2676 * lpfc_stop_vport_timers - Stop all the timers associated with a vport 2677 * @vport: pointer to a virtual N_Port data structure. 2678 * 2679 * This routine stops all the timers associated with a @vport. This function 2680 * is invoked before disabling or deleting a @vport. Note that the physical 2681 * port is treated as @vport 0. 2682 **/ 2683 void 2684 lpfc_stop_vport_timers(struct lpfc_vport *vport) 2685 { 2686 del_timer_sync(&vport->els_tmofunc); 2687 del_timer_sync(&vport->delayed_disc_tmo); 2688 lpfc_can_disctmo(vport); 2689 return; 2690 } 2691 2692 /** 2693 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2694 * @phba: pointer to lpfc hba data structure. 2695 * 2696 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The 2697 * caller of this routine should already hold the host lock. 2698 **/ 2699 void 2700 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2701 { 2702 /* Clear pending FCF rediscovery wait flag */ 2703 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 2704 2705 /* Now, try to stop the timer */ 2706 del_timer(&phba->fcf.redisc_wait); 2707 } 2708 2709 /** 2710 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer 2711 * @phba: pointer to lpfc hba data structure. 2712 * 2713 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It 2714 * checks whether the FCF rediscovery wait timer is pending with the host 2715 * lock held before proceeding with disabling the timer and clearing the 2716 * wait timer pendig flag. 2717 **/ 2718 void 2719 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) 2720 { 2721 spin_lock_irq(&phba->hbalock); 2722 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 2723 /* FCF rediscovery timer already fired or stopped */ 2724 spin_unlock_irq(&phba->hbalock); 2725 return; 2726 } 2727 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2728 /* Clear failover in progress flags */ 2729 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); 2730 spin_unlock_irq(&phba->hbalock); 2731 } 2732 2733 /** 2734 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA 2735 * @phba: pointer to lpfc hba data structure. 2736 * 2737 * This routine stops all the timers associated with a HBA. This function is 2738 * invoked before either putting a HBA offline or unloading the driver. 2739 **/ 2740 void 2741 lpfc_stop_hba_timers(struct lpfc_hba *phba) 2742 { 2743 lpfc_stop_vport_timers(phba->pport); 2744 del_timer_sync(&phba->sli.mbox_tmo); 2745 del_timer_sync(&phba->fabric_block_timer); 2746 del_timer_sync(&phba->eratt_poll); 2747 del_timer_sync(&phba->hb_tmofunc); 2748 if (phba->sli_rev == LPFC_SLI_REV4) { 2749 del_timer_sync(&phba->rrq_tmr); 2750 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 2751 } 2752 phba->hb_outstanding = 0; 2753 2754 switch (phba->pci_dev_grp) { 2755 case LPFC_PCI_DEV_LP: 2756 /* Stop any LightPulse device specific driver timers */ 2757 del_timer_sync(&phba->fcp_poll_timer); 2758 break; 2759 case LPFC_PCI_DEV_OC: 2760 /* Stop any OneConnect device sepcific driver timers */ 2761 lpfc_sli4_stop_fcf_redisc_wait_timer(phba); 2762 break; 2763 default: 2764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2765 "0297 Invalid device group (x%x)\n", 2766 phba->pci_dev_grp); 2767 break; 2768 } 2769 return; 2770 } 2771 2772 /** 2773 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked 2774 * @phba: pointer to lpfc hba data structure. 2775 * 2776 * This routine marks a HBA's management interface as blocked. Once the HBA's 2777 * management interface is marked as blocked, all the user space access to 2778 * the HBA, whether they are from sysfs interface or libdfc interface will 2779 * all be blocked. The HBA is set to block the management interface when the 2780 * driver prepares the HBA interface for online or offline. 2781 **/ 2782 static void 2783 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) 2784 { 2785 unsigned long iflag; 2786 uint8_t actcmd = MBX_HEARTBEAT; 2787 unsigned long timeout; 2788 2789 spin_lock_irqsave(&phba->hbalock, iflag); 2790 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2791 spin_unlock_irqrestore(&phba->hbalock, iflag); 2792 if (mbx_action == LPFC_MBX_NO_WAIT) 2793 return; 2794 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 2795 spin_lock_irqsave(&phba->hbalock, iflag); 2796 if (phba->sli.mbox_active) { 2797 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2798 /* Determine how long we might wait for the active mailbox 2799 * command to be gracefully completed by firmware. 2800 */ 2801 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2802 phba->sli.mbox_active) * 1000) + jiffies; 2803 } 2804 spin_unlock_irqrestore(&phba->hbalock, iflag); 2805 2806 /* Wait for the outstnading mailbox command to complete */ 2807 while (phba->sli.mbox_active) { 2808 /* Check active mailbox complete status every 2ms */ 2809 msleep(2); 2810 if (time_after(jiffies, timeout)) { 2811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2812 "2813 Mgmt IO is Blocked %x " 2813 "- mbox cmd %x still active\n", 2814 phba->sli.sli_flag, actcmd); 2815 break; 2816 } 2817 } 2818 } 2819 2820 /** 2821 * lpfc_sli4_node_prep - Assign RPIs for active nodes. 2822 * @phba: pointer to lpfc hba data structure. 2823 * 2824 * Allocate RPIs for all active remote nodes. This is needed whenever 2825 * an SLI4 adapter is reset and the driver is not unloading. Its purpose 2826 * is to fixup the temporary rpi assignments. 2827 **/ 2828 void 2829 lpfc_sli4_node_prep(struct lpfc_hba *phba) 2830 { 2831 struct lpfc_nodelist *ndlp, *next_ndlp; 2832 struct lpfc_vport **vports; 2833 int i; 2834 2835 if (phba->sli_rev != LPFC_SLI_REV4) 2836 return; 2837 2838 vports = lpfc_create_vport_work_array(phba); 2839 if (vports != NULL) { 2840 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2841 if (vports[i]->load_flag & FC_UNLOADING) 2842 continue; 2843 2844 list_for_each_entry_safe(ndlp, next_ndlp, 2845 &vports[i]->fc_nodes, 2846 nlp_listp) { 2847 if (NLP_CHK_NODE_ACT(ndlp)) { 2848 ndlp->nlp_rpi = 2849 lpfc_sli4_alloc_rpi(phba); 2850 lpfc_printf_vlog(ndlp->vport, KERN_INFO, 2851 LOG_NODE, 2852 "0009 rpi:%x DID:%x " 2853 "flg:%x map:%x %p\n", 2854 ndlp->nlp_rpi, 2855 ndlp->nlp_DID, 2856 ndlp->nlp_flag, 2857 ndlp->nlp_usg_map, 2858 ndlp); 2859 } 2860 } 2861 } 2862 } 2863 lpfc_destroy_vport_work_array(phba, vports); 2864 } 2865 2866 /** 2867 * lpfc_online - Initialize and bring a HBA online 2868 * @phba: pointer to lpfc hba data structure. 2869 * 2870 * This routine initializes the HBA and brings a HBA online. During this 2871 * process, the management interface is blocked to prevent user space access 2872 * to the HBA interfering with the driver initialization. 2873 * 2874 * Return codes 2875 * 0 - successful 2876 * 1 - failed 2877 **/ 2878 int 2879 lpfc_online(struct lpfc_hba *phba) 2880 { 2881 struct lpfc_vport *vport; 2882 struct lpfc_vport **vports; 2883 int i; 2884 bool vpis_cleared = false; 2885 2886 if (!phba) 2887 return 0; 2888 vport = phba->pport; 2889 2890 if (!(vport->fc_flag & FC_OFFLINE_MODE)) 2891 return 0; 2892 2893 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2894 "0458 Bring Adapter online\n"); 2895 2896 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 2897 2898 if (!lpfc_sli_queue_setup(phba)) { 2899 lpfc_unblock_mgmt_io(phba); 2900 return 1; 2901 } 2902 2903 if (phba->sli_rev == LPFC_SLI_REV4) { 2904 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ 2905 lpfc_unblock_mgmt_io(phba); 2906 return 1; 2907 } 2908 spin_lock_irq(&phba->hbalock); 2909 if (!phba->sli4_hba.max_cfg_param.vpi_used) 2910 vpis_cleared = true; 2911 spin_unlock_irq(&phba->hbalock); 2912 } else { 2913 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ 2914 lpfc_unblock_mgmt_io(phba); 2915 return 1; 2916 } 2917 } 2918 2919 vports = lpfc_create_vport_work_array(phba); 2920 if (vports != NULL) { 2921 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2922 struct Scsi_Host *shost; 2923 shost = lpfc_shost_from_vport(vports[i]); 2924 spin_lock_irq(shost->host_lock); 2925 vports[i]->fc_flag &= ~FC_OFFLINE_MODE; 2926 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 2927 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2928 if (phba->sli_rev == LPFC_SLI_REV4) { 2929 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 2930 if ((vpis_cleared) && 2931 (vports[i]->port_type != 2932 LPFC_PHYSICAL_PORT)) 2933 vports[i]->vpi = 0; 2934 } 2935 spin_unlock_irq(shost->host_lock); 2936 } 2937 } 2938 lpfc_destroy_vport_work_array(phba, vports); 2939 2940 lpfc_unblock_mgmt_io(phba); 2941 return 0; 2942 } 2943 2944 /** 2945 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked 2946 * @phba: pointer to lpfc hba data structure. 2947 * 2948 * This routine marks a HBA's management interface as not blocked. Once the 2949 * HBA's management interface is marked as not blocked, all the user space 2950 * access to the HBA, whether they are from sysfs interface or libdfc 2951 * interface will be allowed. The HBA is set to block the management interface 2952 * when the driver prepares the HBA interface for online or offline and then 2953 * set to unblock the management interface afterwards. 2954 **/ 2955 void 2956 lpfc_unblock_mgmt_io(struct lpfc_hba * phba) 2957 { 2958 unsigned long iflag; 2959 2960 spin_lock_irqsave(&phba->hbalock, iflag); 2961 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO; 2962 spin_unlock_irqrestore(&phba->hbalock, iflag); 2963 } 2964 2965 /** 2966 * lpfc_offline_prep - Prepare a HBA to be brought offline 2967 * @phba: pointer to lpfc hba data structure. 2968 * 2969 * This routine is invoked to prepare a HBA to be brought offline. It performs 2970 * unregistration login to all the nodes on all vports and flushes the mailbox 2971 * queue to make it ready to be brought offline. 2972 **/ 2973 void 2974 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) 2975 { 2976 struct lpfc_vport *vport = phba->pport; 2977 struct lpfc_nodelist *ndlp, *next_ndlp; 2978 struct lpfc_vport **vports; 2979 struct Scsi_Host *shost; 2980 int i; 2981 2982 if (vport->fc_flag & FC_OFFLINE_MODE) 2983 return; 2984 2985 lpfc_block_mgmt_io(phba, mbx_action); 2986 2987 lpfc_linkdown(phba); 2988 2989 /* Issue an unreg_login to all nodes on all vports */ 2990 vports = lpfc_create_vport_work_array(phba); 2991 if (vports != NULL) { 2992 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2993 if (vports[i]->load_flag & FC_UNLOADING) 2994 continue; 2995 shost = lpfc_shost_from_vport(vports[i]); 2996 spin_lock_irq(shost->host_lock); 2997 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 2998 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 2999 vports[i]->fc_flag &= ~FC_VFI_REGISTERED; 3000 spin_unlock_irq(shost->host_lock); 3001 3002 shost = lpfc_shost_from_vport(vports[i]); 3003 list_for_each_entry_safe(ndlp, next_ndlp, 3004 &vports[i]->fc_nodes, 3005 nlp_listp) { 3006 if (!NLP_CHK_NODE_ACT(ndlp)) 3007 continue; 3008 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 3009 continue; 3010 if (ndlp->nlp_type & NLP_FABRIC) { 3011 lpfc_disc_state_machine(vports[i], ndlp, 3012 NULL, NLP_EVT_DEVICE_RECOVERY); 3013 lpfc_disc_state_machine(vports[i], ndlp, 3014 NULL, NLP_EVT_DEVICE_RM); 3015 } 3016 spin_lock_irq(shost->host_lock); 3017 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 3018 spin_unlock_irq(shost->host_lock); 3019 /* 3020 * Whenever an SLI4 port goes offline, free the 3021 * RPI. Get a new RPI when the adapter port 3022 * comes back online. 3023 */ 3024 if (phba->sli_rev == LPFC_SLI_REV4) { 3025 lpfc_printf_vlog(ndlp->vport, 3026 KERN_INFO, LOG_NODE, 3027 "0011 lpfc_offline: " 3028 "ndlp:x%p did %x " 3029 "usgmap:x%x rpi:%x\n", 3030 ndlp, ndlp->nlp_DID, 3031 ndlp->nlp_usg_map, 3032 ndlp->nlp_rpi); 3033 3034 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 3035 } 3036 lpfc_unreg_rpi(vports[i], ndlp); 3037 } 3038 } 3039 } 3040 lpfc_destroy_vport_work_array(phba, vports); 3041 3042 lpfc_sli_mbox_sys_shutdown(phba, mbx_action); 3043 } 3044 3045 /** 3046 * lpfc_offline - Bring a HBA offline 3047 * @phba: pointer to lpfc hba data structure. 3048 * 3049 * This routine actually brings a HBA offline. It stops all the timers 3050 * associated with the HBA, brings down the SLI layer, and eventually 3051 * marks the HBA as in offline state for the upper layer protocol. 3052 **/ 3053 void 3054 lpfc_offline(struct lpfc_hba *phba) 3055 { 3056 struct Scsi_Host *shost; 3057 struct lpfc_vport **vports; 3058 int i; 3059 3060 if (phba->pport->fc_flag & FC_OFFLINE_MODE) 3061 return; 3062 3063 /* stop port and all timers associated with this hba */ 3064 lpfc_stop_port(phba); 3065 vports = lpfc_create_vport_work_array(phba); 3066 if (vports != NULL) 3067 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 3068 lpfc_stop_vport_timers(vports[i]); 3069 lpfc_destroy_vport_work_array(phba, vports); 3070 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3071 "0460 Bring Adapter offline\n"); 3072 /* Bring down the SLI Layer and cleanup. The HBA is offline 3073 now. */ 3074 lpfc_sli_hba_down(phba); 3075 spin_lock_irq(&phba->hbalock); 3076 phba->work_ha = 0; 3077 spin_unlock_irq(&phba->hbalock); 3078 vports = lpfc_create_vport_work_array(phba); 3079 if (vports != NULL) 3080 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3081 shost = lpfc_shost_from_vport(vports[i]); 3082 spin_lock_irq(shost->host_lock); 3083 vports[i]->work_port_events = 0; 3084 vports[i]->fc_flag |= FC_OFFLINE_MODE; 3085 spin_unlock_irq(shost->host_lock); 3086 } 3087 lpfc_destroy_vport_work_array(phba, vports); 3088 } 3089 3090 /** 3091 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists 3092 * @phba: pointer to lpfc hba data structure. 3093 * 3094 * This routine is to free all the SCSI buffers and IOCBs from the driver 3095 * list back to kernel. It is called from lpfc_pci_remove_one to free 3096 * the internal resources before the device is removed from the system. 3097 **/ 3098 static void 3099 lpfc_scsi_free(struct lpfc_hba *phba) 3100 { 3101 struct lpfc_scsi_buf *sb, *sb_next; 3102 struct lpfc_iocbq *io, *io_next; 3103 3104 spin_lock_irq(&phba->hbalock); 3105 3106 /* Release all the lpfc_scsi_bufs maintained by this host. */ 3107 3108 spin_lock(&phba->scsi_buf_list_put_lock); 3109 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, 3110 list) { 3111 list_del(&sb->list); 3112 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3113 sb->dma_handle); 3114 kfree(sb); 3115 phba->total_scsi_bufs--; 3116 } 3117 spin_unlock(&phba->scsi_buf_list_put_lock); 3118 3119 spin_lock(&phba->scsi_buf_list_get_lock); 3120 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, 3121 list) { 3122 list_del(&sb->list); 3123 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 3124 sb->dma_handle); 3125 kfree(sb); 3126 phba->total_scsi_bufs--; 3127 } 3128 spin_unlock(&phba->scsi_buf_list_get_lock); 3129 3130 /* Release all the lpfc_iocbq entries maintained by this host. */ 3131 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { 3132 list_del(&io->list); 3133 kfree(io); 3134 phba->total_iocbq_bufs--; 3135 } 3136 3137 spin_unlock_irq(&phba->hbalock); 3138 } 3139 3140 /** 3141 * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping 3142 * @phba: pointer to lpfc hba data structure. 3143 * 3144 * This routine first calculates the sizes of the current els and allocated 3145 * scsi sgl lists, and then goes through all sgls to updates the physical 3146 * XRIs assigned due to port function reset. During port initialization, the 3147 * current els and allocated scsi sgl lists are 0s. 3148 * 3149 * Return codes 3150 * 0 - successful (for now, it always returns 0) 3151 **/ 3152 int 3153 lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) 3154 { 3155 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL; 3156 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL; 3157 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt; 3158 LIST_HEAD(els_sgl_list); 3159 LIST_HEAD(scsi_sgl_list); 3160 int rc; 3161 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3162 3163 /* 3164 * update on pci function's els xri-sgl list 3165 */ 3166 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 3167 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) { 3168 /* els xri-sgl expanded */ 3169 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt; 3170 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3171 "3157 ELS xri-sgl count increased from " 3172 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3173 els_xri_cnt); 3174 /* allocate the additional els sgls */ 3175 for (i = 0; i < xri_cnt; i++) { 3176 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), 3177 GFP_KERNEL); 3178 if (sglq_entry == NULL) { 3179 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3180 "2562 Failure to allocate an " 3181 "ELS sgl entry:%d\n", i); 3182 rc = -ENOMEM; 3183 goto out_free_mem; 3184 } 3185 sglq_entry->buff_type = GEN_BUFF_TYPE; 3186 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, 3187 &sglq_entry->phys); 3188 if (sglq_entry->virt == NULL) { 3189 kfree(sglq_entry); 3190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3191 "2563 Failure to allocate an " 3192 "ELS mbuf:%d\n", i); 3193 rc = -ENOMEM; 3194 goto out_free_mem; 3195 } 3196 sglq_entry->sgl = sglq_entry->virt; 3197 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); 3198 sglq_entry->state = SGL_FREED; 3199 list_add_tail(&sglq_entry->list, &els_sgl_list); 3200 } 3201 spin_lock_irq(&phba->hbalock); 3202 spin_lock(&pring->ring_lock); 3203 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3204 spin_unlock(&pring->ring_lock); 3205 spin_unlock_irq(&phba->hbalock); 3206 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3207 /* els xri-sgl shrinked */ 3208 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; 3209 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3210 "3158 ELS xri-sgl count decreased from " 3211 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3212 els_xri_cnt); 3213 spin_lock_irq(&phba->hbalock); 3214 spin_lock(&pring->ring_lock); 3215 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3216 spin_unlock(&pring->ring_lock); 3217 spin_unlock_irq(&phba->hbalock); 3218 /* release extra els sgls from list */ 3219 for (i = 0; i < xri_cnt; i++) { 3220 list_remove_head(&els_sgl_list, 3221 sglq_entry, struct lpfc_sglq, list); 3222 if (sglq_entry) { 3223 lpfc_mbuf_free(phba, sglq_entry->virt, 3224 sglq_entry->phys); 3225 kfree(sglq_entry); 3226 } 3227 } 3228 spin_lock_irq(&phba->hbalock); 3229 spin_lock(&pring->ring_lock); 3230 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3231 spin_unlock(&pring->ring_lock); 3232 spin_unlock_irq(&phba->hbalock); 3233 } else 3234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3235 "3163 ELS xri-sgl count unchanged: %d\n", 3236 els_xri_cnt); 3237 phba->sli4_hba.els_xri_cnt = els_xri_cnt; 3238 3239 /* update xris to els sgls on the list */ 3240 sglq_entry = NULL; 3241 sglq_entry_next = NULL; 3242 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 3243 &phba->sli4_hba.lpfc_sgl_list, list) { 3244 lxri = lpfc_sli4_next_xritag(phba); 3245 if (lxri == NO_XRI) { 3246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3247 "2400 Failed to allocate xri for " 3248 "ELS sgl\n"); 3249 rc = -ENOMEM; 3250 goto out_free_mem; 3251 } 3252 sglq_entry->sli4_lxritag = lxri; 3253 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3254 } 3255 3256 /* 3257 * update on pci function's allocated scsi xri-sgl list 3258 */ 3259 phba->total_scsi_bufs = 0; 3260 3261 /* maximum number of xris available for scsi buffers */ 3262 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - 3263 els_xri_cnt; 3264 3265 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3266 "2401 Current allocated SCSI xri-sgl count:%d, " 3267 "maximum SCSI xri count:%d\n", 3268 phba->sli4_hba.scsi_xri_cnt, 3269 phba->sli4_hba.scsi_xri_max); 3270 3271 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3272 spin_lock(&phba->scsi_buf_list_put_lock); 3273 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); 3274 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); 3275 spin_unlock(&phba->scsi_buf_list_put_lock); 3276 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3277 3278 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { 3279 /* max scsi xri shrinked below the allocated scsi buffers */ 3280 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - 3281 phba->sli4_hba.scsi_xri_max; 3282 /* release the extra allocated scsi buffers */ 3283 for (i = 0; i < scsi_xri_cnt; i++) { 3284 list_remove_head(&scsi_sgl_list, psb, 3285 struct lpfc_scsi_buf, list); 3286 if (psb) { 3287 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, 3288 psb->data, psb->dma_handle); 3289 kfree(psb); 3290 } 3291 } 3292 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3293 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3294 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3295 } 3296 3297 /* update xris associated to remaining allocated scsi buffers */ 3298 psb = NULL; 3299 psb_next = NULL; 3300 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { 3301 lxri = lpfc_sli4_next_xritag(phba); 3302 if (lxri == NO_XRI) { 3303 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3304 "2560 Failed to allocate xri for " 3305 "scsi buffer\n"); 3306 rc = -ENOMEM; 3307 goto out_free_mem; 3308 } 3309 psb->cur_iocbq.sli4_lxritag = lxri; 3310 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; 3311 } 3312 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3313 spin_lock(&phba->scsi_buf_list_put_lock); 3314 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); 3315 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 3316 spin_unlock(&phba->scsi_buf_list_put_lock); 3317 spin_unlock_irq(&phba->scsi_buf_list_get_lock); 3318 3319 return 0; 3320 3321 out_free_mem: 3322 lpfc_free_els_sgl_list(phba); 3323 lpfc_scsi_free(phba); 3324 return rc; 3325 } 3326 3327 /** 3328 * lpfc_create_port - Create an FC port 3329 * @phba: pointer to lpfc hba data structure. 3330 * @instance: a unique integer ID to this FC port. 3331 * @dev: pointer to the device data structure. 3332 * 3333 * This routine creates a FC port for the upper layer protocol. The FC port 3334 * can be created on top of either a physical port or a virtual port provided 3335 * by the HBA. This routine also allocates a SCSI host data structure (shost) 3336 * and associates the FC port created before adding the shost into the SCSI 3337 * layer. 3338 * 3339 * Return codes 3340 * @vport - pointer to the virtual N_Port data structure. 3341 * NULL - port create failed. 3342 **/ 3343 struct lpfc_vport * 3344 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) 3345 { 3346 struct lpfc_vport *vport; 3347 struct Scsi_Host *shost; 3348 int error = 0; 3349 3350 if (dev != &phba->pcidev->dev) { 3351 shost = scsi_host_alloc(&lpfc_vport_template, 3352 sizeof(struct lpfc_vport)); 3353 } else { 3354 if (phba->sli_rev == LPFC_SLI_REV4) 3355 shost = scsi_host_alloc(&lpfc_template, 3356 sizeof(struct lpfc_vport)); 3357 else 3358 shost = scsi_host_alloc(&lpfc_template_s3, 3359 sizeof(struct lpfc_vport)); 3360 } 3361 if (!shost) 3362 goto out; 3363 3364 vport = (struct lpfc_vport *) shost->hostdata; 3365 vport->phba = phba; 3366 vport->load_flag |= FC_LOADING; 3367 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3368 vport->fc_rscn_flush = 0; 3369 3370 lpfc_get_vport_cfgparam(vport); 3371 shost->unique_id = instance; 3372 shost->max_id = LPFC_MAX_TARGET; 3373 shost->max_lun = vport->cfg_max_luns; 3374 shost->this_id = -1; 3375 shost->max_cmd_len = 16; 3376 shost->nr_hw_queues = phba->cfg_fcp_io_channel; 3377 if (phba->sli_rev == LPFC_SLI_REV4) { 3378 shost->dma_boundary = 3379 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 3380 shost->sg_tablesize = phba->cfg_sg_seg_cnt; 3381 } 3382 3383 /* 3384 * Set initial can_queue value since 0 is no longer supported and 3385 * scsi_add_host will fail. This will be adjusted later based on the 3386 * max xri value determined in hba setup. 3387 */ 3388 shost->can_queue = phba->cfg_hba_queue_depth - 10; 3389 if (dev != &phba->pcidev->dev) { 3390 shost->transportt = lpfc_vport_transport_template; 3391 vport->port_type = LPFC_NPIV_PORT; 3392 } else { 3393 shost->transportt = lpfc_transport_template; 3394 vport->port_type = LPFC_PHYSICAL_PORT; 3395 } 3396 3397 /* Initialize all internally managed lists. */ 3398 INIT_LIST_HEAD(&vport->fc_nodes); 3399 INIT_LIST_HEAD(&vport->rcv_buffer_list); 3400 spin_lock_init(&vport->work_port_lock); 3401 3402 init_timer(&vport->fc_disctmo); 3403 vport->fc_disctmo.function = lpfc_disc_timeout; 3404 vport->fc_disctmo.data = (unsigned long)vport; 3405 3406 init_timer(&vport->els_tmofunc); 3407 vport->els_tmofunc.function = lpfc_els_timeout; 3408 vport->els_tmofunc.data = (unsigned long)vport; 3409 3410 init_timer(&vport->delayed_disc_tmo); 3411 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; 3412 vport->delayed_disc_tmo.data = (unsigned long)vport; 3413 3414 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); 3415 if (error) 3416 goto out_put_shost; 3417 3418 spin_lock_irq(&phba->hbalock); 3419 list_add_tail(&vport->listentry, &phba->port_list); 3420 spin_unlock_irq(&phba->hbalock); 3421 return vport; 3422 3423 out_put_shost: 3424 scsi_host_put(shost); 3425 out: 3426 return NULL; 3427 } 3428 3429 /** 3430 * destroy_port - destroy an FC port 3431 * @vport: pointer to an lpfc virtual N_Port data structure. 3432 * 3433 * This routine destroys a FC port from the upper layer protocol. All the 3434 * resources associated with the port are released. 3435 **/ 3436 void 3437 destroy_port(struct lpfc_vport *vport) 3438 { 3439 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3440 struct lpfc_hba *phba = vport->phba; 3441 3442 lpfc_debugfs_terminate(vport); 3443 fc_remove_host(shost); 3444 scsi_remove_host(shost); 3445 3446 spin_lock_irq(&phba->hbalock); 3447 list_del_init(&vport->listentry); 3448 spin_unlock_irq(&phba->hbalock); 3449 3450 lpfc_cleanup(vport); 3451 return; 3452 } 3453 3454 /** 3455 * lpfc_get_instance - Get a unique integer ID 3456 * 3457 * This routine allocates a unique integer ID from lpfc_hba_index pool. It 3458 * uses the kernel idr facility to perform the task. 3459 * 3460 * Return codes: 3461 * instance - a unique integer ID allocated as the new instance. 3462 * -1 - lpfc get instance failed. 3463 **/ 3464 int 3465 lpfc_get_instance(void) 3466 { 3467 int ret; 3468 3469 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL); 3470 return ret < 0 ? -1 : ret; 3471 } 3472 3473 /** 3474 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done 3475 * @shost: pointer to SCSI host data structure. 3476 * @time: elapsed time of the scan in jiffies. 3477 * 3478 * This routine is called by the SCSI layer with a SCSI host to determine 3479 * whether the scan host is finished. 3480 * 3481 * Note: there is no scan_start function as adapter initialization will have 3482 * asynchronously kicked off the link initialization. 3483 * 3484 * Return codes 3485 * 0 - SCSI host scan is not over yet. 3486 * 1 - SCSI host scan is over. 3487 **/ 3488 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 3489 { 3490 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3491 struct lpfc_hba *phba = vport->phba; 3492 int stat = 0; 3493 3494 spin_lock_irq(shost->host_lock); 3495 3496 if (vport->load_flag & FC_UNLOADING) { 3497 stat = 1; 3498 goto finished; 3499 } 3500 if (time >= msecs_to_jiffies(30 * 1000)) { 3501 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3502 "0461 Scanning longer than 30 " 3503 "seconds. Continuing initialization\n"); 3504 stat = 1; 3505 goto finished; 3506 } 3507 if (time >= msecs_to_jiffies(15 * 1000) && 3508 phba->link_state <= LPFC_LINK_DOWN) { 3509 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3510 "0465 Link down longer than 15 " 3511 "seconds. Continuing initialization\n"); 3512 stat = 1; 3513 goto finished; 3514 } 3515 3516 if (vport->port_state != LPFC_VPORT_READY) 3517 goto finished; 3518 if (vport->num_disc_nodes || vport->fc_prli_sent) 3519 goto finished; 3520 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) 3521 goto finished; 3522 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) 3523 goto finished; 3524 3525 stat = 1; 3526 3527 finished: 3528 spin_unlock_irq(shost->host_lock); 3529 return stat; 3530 } 3531 3532 /** 3533 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port 3534 * @shost: pointer to SCSI host data structure. 3535 * 3536 * This routine initializes a given SCSI host attributes on a FC port. The 3537 * SCSI host can be either on top of a physical port or a virtual port. 3538 **/ 3539 void lpfc_host_attrib_init(struct Scsi_Host *shost) 3540 { 3541 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3542 struct lpfc_hba *phba = vport->phba; 3543 /* 3544 * Set fixed host attributes. Must done after lpfc_sli_hba_setup(). 3545 */ 3546 3547 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 3548 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3549 fc_host_supported_classes(shost) = FC_COS_CLASS3; 3550 3551 memset(fc_host_supported_fc4s(shost), 0, 3552 sizeof(fc_host_supported_fc4s(shost))); 3553 fc_host_supported_fc4s(shost)[2] = 1; 3554 fc_host_supported_fc4s(shost)[7] = 1; 3555 3556 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost), 3557 sizeof fc_host_symbolic_name(shost)); 3558 3559 fc_host_supported_speeds(shost) = 0; 3560 if (phba->lmt & LMT_32Gb) 3561 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT; 3562 if (phba->lmt & LMT_16Gb) 3563 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT; 3564 if (phba->lmt & LMT_10Gb) 3565 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT; 3566 if (phba->lmt & LMT_8Gb) 3567 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT; 3568 if (phba->lmt & LMT_4Gb) 3569 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT; 3570 if (phba->lmt & LMT_2Gb) 3571 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT; 3572 if (phba->lmt & LMT_1Gb) 3573 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 3574 3575 fc_host_maxframe_size(shost) = 3576 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 3577 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; 3578 3579 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; 3580 3581 /* This value is also unchanging */ 3582 memset(fc_host_active_fc4s(shost), 0, 3583 sizeof(fc_host_active_fc4s(shost))); 3584 fc_host_active_fc4s(shost)[2] = 1; 3585 fc_host_active_fc4s(shost)[7] = 1; 3586 3587 fc_host_max_npiv_vports(shost) = phba->max_vpi; 3588 spin_lock_irq(shost->host_lock); 3589 vport->load_flag &= ~FC_LOADING; 3590 spin_unlock_irq(shost->host_lock); 3591 } 3592 3593 /** 3594 * lpfc_stop_port_s3 - Stop SLI3 device port 3595 * @phba: pointer to lpfc hba data structure. 3596 * 3597 * This routine is invoked to stop an SLI3 device port, it stops the device 3598 * from generating interrupts and stops the device driver's timers for the 3599 * device. 3600 **/ 3601 static void 3602 lpfc_stop_port_s3(struct lpfc_hba *phba) 3603 { 3604 /* Clear all interrupt enable conditions */ 3605 writel(0, phba->HCregaddr); 3606 readl(phba->HCregaddr); /* flush */ 3607 /* Clear all pending interrupts */ 3608 writel(0xffffffff, phba->HAregaddr); 3609 readl(phba->HAregaddr); /* flush */ 3610 3611 /* Reset some HBA SLI setup states */ 3612 lpfc_stop_hba_timers(phba); 3613 phba->pport->work_port_events = 0; 3614 } 3615 3616 /** 3617 * lpfc_stop_port_s4 - Stop SLI4 device port 3618 * @phba: pointer to lpfc hba data structure. 3619 * 3620 * This routine is invoked to stop an SLI4 device port, it stops the device 3621 * from generating interrupts and stops the device driver's timers for the 3622 * device. 3623 **/ 3624 static void 3625 lpfc_stop_port_s4(struct lpfc_hba *phba) 3626 { 3627 /* Reset some HBA SLI4 setup states */ 3628 lpfc_stop_hba_timers(phba); 3629 phba->pport->work_port_events = 0; 3630 phba->sli4_hba.intr_enable = 0; 3631 } 3632 3633 /** 3634 * lpfc_stop_port - Wrapper function for stopping hba port 3635 * @phba: Pointer to HBA context object. 3636 * 3637 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from 3638 * the API jump table function pointer from the lpfc_hba struct. 3639 **/ 3640 void 3641 lpfc_stop_port(struct lpfc_hba *phba) 3642 { 3643 phba->lpfc_stop_port(phba); 3644 } 3645 3646 /** 3647 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer 3648 * @phba: Pointer to hba for which this call is being executed. 3649 * 3650 * This routine starts the timer waiting for the FCF rediscovery to complete. 3651 **/ 3652 void 3653 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) 3654 { 3655 unsigned long fcf_redisc_wait_tmo = 3656 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO)); 3657 /* Start fcf rediscovery wait period timer */ 3658 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo); 3659 spin_lock_irq(&phba->hbalock); 3660 /* Allow action to new fcf asynchronous event */ 3661 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 3662 /* Mark the FCF rediscovery pending state */ 3663 phba->fcf.fcf_flag |= FCF_REDISC_PEND; 3664 spin_unlock_irq(&phba->hbalock); 3665 } 3666 3667 /** 3668 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout 3669 * @ptr: Map to lpfc_hba data structure pointer. 3670 * 3671 * This routine is invoked when waiting for FCF table rediscover has been 3672 * timed out. If new FCF record(s) has (have) been discovered during the 3673 * wait period, a new FCF event shall be added to the FCOE async event 3674 * list, and then worker thread shall be waked up for processing from the 3675 * worker thread context. 3676 **/ 3677 static void 3678 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3679 { 3680 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3681 3682 /* Don't send FCF rediscovery event if timer cancelled */ 3683 spin_lock_irq(&phba->hbalock); 3684 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) { 3685 spin_unlock_irq(&phba->hbalock); 3686 return; 3687 } 3688 /* Clear FCF rediscovery timer pending flag */ 3689 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; 3690 /* FCF rediscovery event to worker thread */ 3691 phba->fcf.fcf_flag |= FCF_REDISC_EVT; 3692 spin_unlock_irq(&phba->hbalock); 3693 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3694 "2776 FCF rediscover quiescent timer expired\n"); 3695 /* wake up worker thread */ 3696 lpfc_worker_wake_up(phba); 3697 } 3698 3699 /** 3700 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code 3701 * @phba: pointer to lpfc hba data structure. 3702 * @acqe_link: pointer to the async link completion queue entry. 3703 * 3704 * This routine is to parse the SLI4 link-attention link fault code and 3705 * translate it into the base driver's read link attention mailbox command 3706 * status. 3707 * 3708 * Return: Link-attention status in terms of base driver's coding. 3709 **/ 3710 static uint16_t 3711 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, 3712 struct lpfc_acqe_link *acqe_link) 3713 { 3714 uint16_t latt_fault; 3715 3716 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { 3717 case LPFC_ASYNC_LINK_FAULT_NONE: 3718 case LPFC_ASYNC_LINK_FAULT_LOCAL: 3719 case LPFC_ASYNC_LINK_FAULT_REMOTE: 3720 latt_fault = 0; 3721 break; 3722 default: 3723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3724 "0398 Invalid link fault code: x%x\n", 3725 bf_get(lpfc_acqe_link_fault, acqe_link)); 3726 latt_fault = MBXERR_ERROR; 3727 break; 3728 } 3729 return latt_fault; 3730 } 3731 3732 /** 3733 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type 3734 * @phba: pointer to lpfc hba data structure. 3735 * @acqe_link: pointer to the async link completion queue entry. 3736 * 3737 * This routine is to parse the SLI4 link attention type and translate it 3738 * into the base driver's link attention type coding. 3739 * 3740 * Return: Link attention type in terms of base driver's coding. 3741 **/ 3742 static uint8_t 3743 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, 3744 struct lpfc_acqe_link *acqe_link) 3745 { 3746 uint8_t att_type; 3747 3748 switch (bf_get(lpfc_acqe_link_status, acqe_link)) { 3749 case LPFC_ASYNC_LINK_STATUS_DOWN: 3750 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: 3751 att_type = LPFC_ATT_LINK_DOWN; 3752 break; 3753 case LPFC_ASYNC_LINK_STATUS_UP: 3754 /* Ignore physical link up events - wait for logical link up */ 3755 att_type = LPFC_ATT_RESERVED; 3756 break; 3757 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: 3758 att_type = LPFC_ATT_LINK_UP; 3759 break; 3760 default: 3761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3762 "0399 Invalid link attention type: x%x\n", 3763 bf_get(lpfc_acqe_link_status, acqe_link)); 3764 att_type = LPFC_ATT_RESERVED; 3765 break; 3766 } 3767 return att_type; 3768 } 3769 3770 /** 3771 * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed 3772 * @phba: pointer to lpfc hba data structure. 3773 * 3774 * This routine is to get an SLI3 FC port's link speed in Mbps. 3775 * 3776 * Return: link speed in terms of Mbps. 3777 **/ 3778 uint32_t 3779 lpfc_sli_port_speed_get(struct lpfc_hba *phba) 3780 { 3781 uint32_t link_speed; 3782 3783 if (!lpfc_is_link_up(phba)) 3784 return 0; 3785 3786 if (phba->sli_rev <= LPFC_SLI_REV3) { 3787 switch (phba->fc_linkspeed) { 3788 case LPFC_LINK_SPEED_1GHZ: 3789 link_speed = 1000; 3790 break; 3791 case LPFC_LINK_SPEED_2GHZ: 3792 link_speed = 2000; 3793 break; 3794 case LPFC_LINK_SPEED_4GHZ: 3795 link_speed = 4000; 3796 break; 3797 case LPFC_LINK_SPEED_8GHZ: 3798 link_speed = 8000; 3799 break; 3800 case LPFC_LINK_SPEED_10GHZ: 3801 link_speed = 10000; 3802 break; 3803 case LPFC_LINK_SPEED_16GHZ: 3804 link_speed = 16000; 3805 break; 3806 default: 3807 link_speed = 0; 3808 } 3809 } else { 3810 if (phba->sli4_hba.link_state.logical_speed) 3811 link_speed = 3812 phba->sli4_hba.link_state.logical_speed; 3813 else 3814 link_speed = phba->sli4_hba.link_state.speed; 3815 } 3816 return link_speed; 3817 } 3818 3819 /** 3820 * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed 3821 * @phba: pointer to lpfc hba data structure. 3822 * @evt_code: asynchronous event code. 3823 * @speed_code: asynchronous event link speed code. 3824 * 3825 * This routine is to parse the giving SLI4 async event link speed code into 3826 * value of Mbps for the link speed. 3827 * 3828 * Return: link speed in terms of Mbps. 3829 **/ 3830 static uint32_t 3831 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, 3832 uint8_t speed_code) 3833 { 3834 uint32_t port_speed; 3835 3836 switch (evt_code) { 3837 case LPFC_TRAILER_CODE_LINK: 3838 switch (speed_code) { 3839 case LPFC_ASYNC_LINK_SPEED_ZERO: 3840 port_speed = 0; 3841 break; 3842 case LPFC_ASYNC_LINK_SPEED_10MBPS: 3843 port_speed = 10; 3844 break; 3845 case LPFC_ASYNC_LINK_SPEED_100MBPS: 3846 port_speed = 100; 3847 break; 3848 case LPFC_ASYNC_LINK_SPEED_1GBPS: 3849 port_speed = 1000; 3850 break; 3851 case LPFC_ASYNC_LINK_SPEED_10GBPS: 3852 port_speed = 10000; 3853 break; 3854 case LPFC_ASYNC_LINK_SPEED_20GBPS: 3855 port_speed = 20000; 3856 break; 3857 case LPFC_ASYNC_LINK_SPEED_25GBPS: 3858 port_speed = 25000; 3859 break; 3860 case LPFC_ASYNC_LINK_SPEED_40GBPS: 3861 port_speed = 40000; 3862 break; 3863 default: 3864 port_speed = 0; 3865 } 3866 break; 3867 case LPFC_TRAILER_CODE_FC: 3868 switch (speed_code) { 3869 case LPFC_FC_LA_SPEED_UNKNOWN: 3870 port_speed = 0; 3871 break; 3872 case LPFC_FC_LA_SPEED_1G: 3873 port_speed = 1000; 3874 break; 3875 case LPFC_FC_LA_SPEED_2G: 3876 port_speed = 2000; 3877 break; 3878 case LPFC_FC_LA_SPEED_4G: 3879 port_speed = 4000; 3880 break; 3881 case LPFC_FC_LA_SPEED_8G: 3882 port_speed = 8000; 3883 break; 3884 case LPFC_FC_LA_SPEED_10G: 3885 port_speed = 10000; 3886 break; 3887 case LPFC_FC_LA_SPEED_16G: 3888 port_speed = 16000; 3889 break; 3890 case LPFC_FC_LA_SPEED_32G: 3891 port_speed = 32000; 3892 break; 3893 default: 3894 port_speed = 0; 3895 } 3896 break; 3897 default: 3898 port_speed = 0; 3899 } 3900 return port_speed; 3901 } 3902 3903 /** 3904 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event 3905 * @phba: pointer to lpfc hba data structure. 3906 * @acqe_link: pointer to the async link completion queue entry. 3907 * 3908 * This routine is to handle the SLI4 asynchronous FCoE link event. 3909 **/ 3910 static void 3911 lpfc_sli4_async_link_evt(struct lpfc_hba *phba, 3912 struct lpfc_acqe_link *acqe_link) 3913 { 3914 struct lpfc_dmabuf *mp; 3915 LPFC_MBOXQ_t *pmb; 3916 MAILBOX_t *mb; 3917 struct lpfc_mbx_read_top *la; 3918 uint8_t att_type; 3919 int rc; 3920 3921 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); 3922 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) 3923 return; 3924 phba->fcoe_eventtag = acqe_link->event_tag; 3925 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3926 if (!pmb) { 3927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3928 "0395 The mboxq allocation failed\n"); 3929 return; 3930 } 3931 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 3932 if (!mp) { 3933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3934 "0396 The lpfc_dmabuf allocation failed\n"); 3935 goto out_free_pmb; 3936 } 3937 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 3938 if (!mp->virt) { 3939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3940 "0397 The mbuf allocation failed\n"); 3941 goto out_free_dmabuf; 3942 } 3943 3944 /* Cleanup any outstanding ELS commands */ 3945 lpfc_els_flush_all_cmd(phba); 3946 3947 /* Block ELS IOCBs until we have done process link event */ 3948 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 3949 3950 /* Update link event statistics */ 3951 phba->sli.slistat.link_event++; 3952 3953 /* Create lpfc_handle_latt mailbox command from link ACQE */ 3954 lpfc_read_topology(phba, pmb, mp); 3955 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 3956 pmb->vport = phba->pport; 3957 3958 /* Keep the link status for extra SLI4 state machine reference */ 3959 phba->sli4_hba.link_state.speed = 3960 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK, 3961 bf_get(lpfc_acqe_link_speed, acqe_link)); 3962 phba->sli4_hba.link_state.duplex = 3963 bf_get(lpfc_acqe_link_duplex, acqe_link); 3964 phba->sli4_hba.link_state.status = 3965 bf_get(lpfc_acqe_link_status, acqe_link); 3966 phba->sli4_hba.link_state.type = 3967 bf_get(lpfc_acqe_link_type, acqe_link); 3968 phba->sli4_hba.link_state.number = 3969 bf_get(lpfc_acqe_link_number, acqe_link); 3970 phba->sli4_hba.link_state.fault = 3971 bf_get(lpfc_acqe_link_fault, acqe_link); 3972 phba->sli4_hba.link_state.logical_speed = 3973 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10; 3974 3975 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3976 "2900 Async FC/FCoE Link event - Speed:%dGBit " 3977 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d " 3978 "Logical speed:%dMbps Fault:%d\n", 3979 phba->sli4_hba.link_state.speed, 3980 phba->sli4_hba.link_state.topology, 3981 phba->sli4_hba.link_state.status, 3982 phba->sli4_hba.link_state.type, 3983 phba->sli4_hba.link_state.number, 3984 phba->sli4_hba.link_state.logical_speed, 3985 phba->sli4_hba.link_state.fault); 3986 /* 3987 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch 3988 * topology info. Note: Optional for non FC-AL ports. 3989 */ 3990 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3991 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3992 if (rc == MBX_NOT_FINISHED) 3993 goto out_free_dmabuf; 3994 return; 3995 } 3996 /* 3997 * For FCoE Mode: fill in all the topology information we need and call 3998 * the READ_TOPOLOGY completion routine to continue without actually 3999 * sending the READ_TOPOLOGY mailbox command to the port. 4000 */ 4001 /* Parse and translate status field */ 4002 mb = &pmb->u.mb; 4003 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); 4004 4005 /* Parse and translate link attention fields */ 4006 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 4007 la->eventTag = acqe_link->event_tag; 4008 bf_set(lpfc_mbx_read_top_att_type, la, att_type); 4009 bf_set(lpfc_mbx_read_top_link_spd, la, 4010 (bf_get(lpfc_acqe_link_speed, acqe_link))); 4011 4012 /* Fake the the following irrelvant fields */ 4013 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); 4014 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); 4015 bf_set(lpfc_mbx_read_top_il, la, 0); 4016 bf_set(lpfc_mbx_read_top_pb, la, 0); 4017 bf_set(lpfc_mbx_read_top_fa, la, 0); 4018 bf_set(lpfc_mbx_read_top_mm, la, 0); 4019 4020 /* Invoke the lpfc_handle_latt mailbox command callback function */ 4021 lpfc_mbx_cmpl_read_topology(phba, pmb); 4022 4023 return; 4024 4025 out_free_dmabuf: 4026 kfree(mp); 4027 out_free_pmb: 4028 mempool_free(pmb, phba->mbox_mem_pool); 4029 } 4030 4031 /** 4032 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event 4033 * @phba: pointer to lpfc hba data structure. 4034 * @acqe_fc: pointer to the async fc completion queue entry. 4035 * 4036 * This routine is to handle the SLI4 asynchronous FC event. It will simply log 4037 * that the event was received and then issue a read_topology mailbox command so 4038 * that the rest of the driver will treat it the same as SLI3. 4039 **/ 4040 static void 4041 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) 4042 { 4043 struct lpfc_dmabuf *mp; 4044 LPFC_MBOXQ_t *pmb; 4045 MAILBOX_t *mb; 4046 struct lpfc_mbx_read_top *la; 4047 int rc; 4048 4049 if (bf_get(lpfc_trailer_type, acqe_fc) != 4050 LPFC_FC_LA_EVENT_TYPE_FC_LINK) { 4051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4052 "2895 Non FC link Event detected.(%d)\n", 4053 bf_get(lpfc_trailer_type, acqe_fc)); 4054 return; 4055 } 4056 /* Keep the link status for extra SLI4 state machine reference */ 4057 phba->sli4_hba.link_state.speed = 4058 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC, 4059 bf_get(lpfc_acqe_fc_la_speed, acqe_fc)); 4060 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; 4061 phba->sli4_hba.link_state.topology = 4062 bf_get(lpfc_acqe_fc_la_topology, acqe_fc); 4063 phba->sli4_hba.link_state.status = 4064 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); 4065 phba->sli4_hba.link_state.type = 4066 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); 4067 phba->sli4_hba.link_state.number = 4068 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); 4069 phba->sli4_hba.link_state.fault = 4070 bf_get(lpfc_acqe_link_fault, acqe_fc); 4071 phba->sli4_hba.link_state.logical_speed = 4072 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10; 4073 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4074 "2896 Async FC event - Speed:%dGBaud Topology:x%x " 4075 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" 4076 "%dMbps Fault:%d\n", 4077 phba->sli4_hba.link_state.speed, 4078 phba->sli4_hba.link_state.topology, 4079 phba->sli4_hba.link_state.status, 4080 phba->sli4_hba.link_state.type, 4081 phba->sli4_hba.link_state.number, 4082 phba->sli4_hba.link_state.logical_speed, 4083 phba->sli4_hba.link_state.fault); 4084 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4085 if (!pmb) { 4086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4087 "2897 The mboxq allocation failed\n"); 4088 return; 4089 } 4090 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4091 if (!mp) { 4092 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4093 "2898 The lpfc_dmabuf allocation failed\n"); 4094 goto out_free_pmb; 4095 } 4096 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 4097 if (!mp->virt) { 4098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4099 "2899 The mbuf allocation failed\n"); 4100 goto out_free_dmabuf; 4101 } 4102 4103 /* Cleanup any outstanding ELS commands */ 4104 lpfc_els_flush_all_cmd(phba); 4105 4106 /* Block ELS IOCBs until we have done process link event */ 4107 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; 4108 4109 /* Update link event statistics */ 4110 phba->sli.slistat.link_event++; 4111 4112 /* Create lpfc_handle_latt mailbox command from link ACQE */ 4113 lpfc_read_topology(phba, pmb, mp); 4114 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; 4115 pmb->vport = phba->pport; 4116 4117 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) { 4118 /* Parse and translate status field */ 4119 mb = &pmb->u.mb; 4120 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, 4121 (void *)acqe_fc); 4122 4123 /* Parse and translate link attention fields */ 4124 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop; 4125 la->eventTag = acqe_fc->event_tag; 4126 bf_set(lpfc_mbx_read_top_att_type, la, 4127 LPFC_FC_LA_TYPE_LINK_DOWN); 4128 4129 /* Invoke the mailbox command callback function */ 4130 lpfc_mbx_cmpl_read_topology(phba, pmb); 4131 4132 return; 4133 } 4134 4135 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4136 if (rc == MBX_NOT_FINISHED) 4137 goto out_free_dmabuf; 4138 return; 4139 4140 out_free_dmabuf: 4141 kfree(mp); 4142 out_free_pmb: 4143 mempool_free(pmb, phba->mbox_mem_pool); 4144 } 4145 4146 /** 4147 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event 4148 * @phba: pointer to lpfc hba data structure. 4149 * @acqe_fc: pointer to the async SLI completion queue entry. 4150 * 4151 * This routine is to handle the SLI4 asynchronous SLI events. 4152 **/ 4153 static void 4154 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) 4155 { 4156 char port_name; 4157 char message[128]; 4158 uint8_t status; 4159 uint8_t evt_type; 4160 uint8_t operational = 0; 4161 struct temp_event temp_event_data; 4162 struct lpfc_acqe_misconfigured_event *misconfigured; 4163 struct Scsi_Host *shost; 4164 4165 evt_type = bf_get(lpfc_trailer_type, acqe_sli); 4166 4167 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4168 "2901 Async SLI event - Event Data1:x%08x Event Data2:" 4169 "x%08x SLI Event Type:%d\n", 4170 acqe_sli->event_data1, acqe_sli->event_data2, 4171 evt_type); 4172 4173 port_name = phba->Port[0]; 4174 if (port_name == 0x00) 4175 port_name = '?'; /* get port name is empty */ 4176 4177 switch (evt_type) { 4178 case LPFC_SLI_EVENT_TYPE_OVER_TEMP: 4179 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4180 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 4181 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4182 4183 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4184 "3190 Over Temperature:%d Celsius- Port Name %c\n", 4185 acqe_sli->event_data1, port_name); 4186 4187 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE; 4188 shost = lpfc_shost_from_vport(phba->pport); 4189 fc_host_post_vendor_event(shost, fc_get_event_number(), 4190 sizeof(temp_event_data), 4191 (char *)&temp_event_data, 4192 SCSI_NL_VID_TYPE_PCI 4193 | PCI_VENDOR_ID_EMULEX); 4194 break; 4195 case LPFC_SLI_EVENT_TYPE_NORM_TEMP: 4196 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 4197 temp_event_data.event_code = LPFC_NORMAL_TEMP; 4198 temp_event_data.data = (uint32_t)acqe_sli->event_data1; 4199 4200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4201 "3191 Normal Temperature:%d Celsius - Port Name %c\n", 4202 acqe_sli->event_data1, port_name); 4203 4204 shost = lpfc_shost_from_vport(phba->pport); 4205 fc_host_post_vendor_event(shost, fc_get_event_number(), 4206 sizeof(temp_event_data), 4207 (char *)&temp_event_data, 4208 SCSI_NL_VID_TYPE_PCI 4209 | PCI_VENDOR_ID_EMULEX); 4210 break; 4211 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED: 4212 misconfigured = (struct lpfc_acqe_misconfigured_event *) 4213 &acqe_sli->event_data1; 4214 4215 /* fetch the status for this port */ 4216 switch (phba->sli4_hba.lnk_info.lnk_no) { 4217 case LPFC_LINK_NUMBER_0: 4218 status = bf_get(lpfc_sli_misconfigured_port0_state, 4219 &misconfigured->theEvent); 4220 operational = bf_get(lpfc_sli_misconfigured_port0_op, 4221 &misconfigured->theEvent); 4222 break; 4223 case LPFC_LINK_NUMBER_1: 4224 status = bf_get(lpfc_sli_misconfigured_port1_state, 4225 &misconfigured->theEvent); 4226 operational = bf_get(lpfc_sli_misconfigured_port1_op, 4227 &misconfigured->theEvent); 4228 break; 4229 case LPFC_LINK_NUMBER_2: 4230 status = bf_get(lpfc_sli_misconfigured_port2_state, 4231 &misconfigured->theEvent); 4232 operational = bf_get(lpfc_sli_misconfigured_port2_op, 4233 &misconfigured->theEvent); 4234 break; 4235 case LPFC_LINK_NUMBER_3: 4236 status = bf_get(lpfc_sli_misconfigured_port3_state, 4237 &misconfigured->theEvent); 4238 operational = bf_get(lpfc_sli_misconfigured_port3_op, 4239 &misconfigured->theEvent); 4240 break; 4241 default: 4242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4243 "3296 " 4244 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED " 4245 "event: Invalid link %d", 4246 phba->sli4_hba.lnk_info.lnk_no); 4247 return; 4248 } 4249 4250 /* Skip if optic state unchanged */ 4251 if (phba->sli4_hba.lnk_info.optic_state == status) 4252 return; 4253 4254 switch (status) { 4255 case LPFC_SLI_EVENT_STATUS_VALID: 4256 sprintf(message, "Physical Link is functional"); 4257 break; 4258 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT: 4259 sprintf(message, "Optics faulted/incorrectly " 4260 "installed/not installed - Reseat optics, " 4261 "if issue not resolved, replace."); 4262 break; 4263 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE: 4264 sprintf(message, 4265 "Optics of two types installed - Remove one " 4266 "optic or install matching pair of optics."); 4267 break; 4268 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED: 4269 sprintf(message, "Incompatible optics - Replace with " 4270 "compatible optics for card to function."); 4271 break; 4272 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED: 4273 sprintf(message, "Unqualified optics - Replace with " 4274 "Avago optics for Warranty and Technical " 4275 "Support - Link is%s operational", 4276 (operational) ? "" : " not"); 4277 break; 4278 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED: 4279 sprintf(message, "Uncertified optics - Replace with " 4280 "Avago-certified optics to enable link " 4281 "operation - Link is%s operational", 4282 (operational) ? "" : " not"); 4283 break; 4284 default: 4285 /* firmware is reporting a status we don't know about */ 4286 sprintf(message, "Unknown event status x%02x", status); 4287 break; 4288 } 4289 phba->sli4_hba.lnk_info.optic_state = status; 4290 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4291 "3176 Port Name %c %s\n", port_name, message); 4292 break; 4293 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT: 4294 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4295 "3192 Remote DPort Test Initiated - " 4296 "Event Data1:x%08x Event Data2: x%08x\n", 4297 acqe_sli->event_data1, acqe_sli->event_data2); 4298 break; 4299 default: 4300 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4301 "3193 Async SLI event - Event Data1:x%08x Event Data2:" 4302 "x%08x SLI Event Type:%d\n", 4303 acqe_sli->event_data1, acqe_sli->event_data2, 4304 evt_type); 4305 break; 4306 } 4307 } 4308 4309 /** 4310 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport 4311 * @vport: pointer to vport data structure. 4312 * 4313 * This routine is to perform Clear Virtual Link (CVL) on a vport in 4314 * response to a CVL event. 4315 * 4316 * Return the pointer to the ndlp with the vport if successful, otherwise 4317 * return NULL. 4318 **/ 4319 static struct lpfc_nodelist * 4320 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) 4321 { 4322 struct lpfc_nodelist *ndlp; 4323 struct Scsi_Host *shost; 4324 struct lpfc_hba *phba; 4325 4326 if (!vport) 4327 return NULL; 4328 phba = vport->phba; 4329 if (!phba) 4330 return NULL; 4331 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4332 if (!ndlp) { 4333 /* Cannot find existing Fabric ndlp, so allocate a new one */ 4334 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 4335 if (!ndlp) 4336 return 0; 4337 lpfc_nlp_init(vport, ndlp, Fabric_DID); 4338 /* Set the node type */ 4339 ndlp->nlp_type |= NLP_FABRIC; 4340 /* Put ndlp onto node list */ 4341 lpfc_enqueue_node(vport, ndlp); 4342 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4343 /* re-setup ndlp without removing from node list */ 4344 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 4345 if (!ndlp) 4346 return 0; 4347 } 4348 if ((phba->pport->port_state < LPFC_FLOGI) && 4349 (phba->pport->port_state != LPFC_VPORT_FAILED)) 4350 return NULL; 4351 /* If virtual link is not yet instantiated ignore CVL */ 4352 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) 4353 && (vport->port_state != LPFC_VPORT_FAILED)) 4354 return NULL; 4355 shost = lpfc_shost_from_vport(vport); 4356 if (!shost) 4357 return NULL; 4358 lpfc_linkdown_port(vport); 4359 lpfc_cleanup_pending_mbox(vport); 4360 spin_lock_irq(shost->host_lock); 4361 vport->fc_flag |= FC_VPORT_CVL_RCVD; 4362 spin_unlock_irq(shost->host_lock); 4363 4364 return ndlp; 4365 } 4366 4367 /** 4368 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports 4369 * @vport: pointer to lpfc hba data structure. 4370 * 4371 * This routine is to perform Clear Virtual Link (CVL) on all vports in 4372 * response to a FCF dead event. 4373 **/ 4374 static void 4375 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) 4376 { 4377 struct lpfc_vport **vports; 4378 int i; 4379 4380 vports = lpfc_create_vport_work_array(phba); 4381 if (vports) 4382 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 4383 lpfc_sli4_perform_vport_cvl(vports[i]); 4384 lpfc_destroy_vport_work_array(phba, vports); 4385 } 4386 4387 /** 4388 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event 4389 * @phba: pointer to lpfc hba data structure. 4390 * @acqe_link: pointer to the async fcoe completion queue entry. 4391 * 4392 * This routine is to handle the SLI4 asynchronous fcoe event. 4393 **/ 4394 static void 4395 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, 4396 struct lpfc_acqe_fip *acqe_fip) 4397 { 4398 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); 4399 int rc; 4400 struct lpfc_vport *vport; 4401 struct lpfc_nodelist *ndlp; 4402 struct Scsi_Host *shost; 4403 int active_vlink_present; 4404 struct lpfc_vport **vports; 4405 int i; 4406 4407 phba->fc_eventTag = acqe_fip->event_tag; 4408 phba->fcoe_eventtag = acqe_fip->event_tag; 4409 switch (event_type) { 4410 case LPFC_FIP_EVENT_TYPE_NEW_FCF: 4411 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: 4412 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) 4413 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4414 LOG_DISCOVERY, 4415 "2546 New FCF event, evt_tag:x%x, " 4416 "index:x%x\n", 4417 acqe_fip->event_tag, 4418 acqe_fip->index); 4419 else 4420 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | 4421 LOG_DISCOVERY, 4422 "2788 FCF param modified event, " 4423 "evt_tag:x%x, index:x%x\n", 4424 acqe_fip->event_tag, 4425 acqe_fip->index); 4426 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4427 /* 4428 * During period of FCF discovery, read the FCF 4429 * table record indexed by the event to update 4430 * FCF roundrobin failover eligible FCF bmask. 4431 */ 4432 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4433 LOG_DISCOVERY, 4434 "2779 Read FCF (x%x) for updating " 4435 "roundrobin FCF failover bmask\n", 4436 acqe_fip->index); 4437 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); 4438 } 4439 4440 /* If the FCF discovery is in progress, do nothing. */ 4441 spin_lock_irq(&phba->hbalock); 4442 if (phba->hba_flag & FCF_TS_INPROG) { 4443 spin_unlock_irq(&phba->hbalock); 4444 break; 4445 } 4446 /* If fast FCF failover rescan event is pending, do nothing */ 4447 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { 4448 spin_unlock_irq(&phba->hbalock); 4449 break; 4450 } 4451 4452 /* If the FCF has been in discovered state, do nothing. */ 4453 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) { 4454 spin_unlock_irq(&phba->hbalock); 4455 break; 4456 } 4457 spin_unlock_irq(&phba->hbalock); 4458 4459 /* Otherwise, scan the entire FCF table and re-discover SAN */ 4460 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4461 "2770 Start FCF table scan per async FCF " 4462 "event, evt_tag:x%x, index:x%x\n", 4463 acqe_fip->event_tag, acqe_fip->index); 4464 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 4465 LPFC_FCOE_FCF_GET_FIRST); 4466 if (rc) 4467 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4468 "2547 Issue FCF scan read FCF mailbox " 4469 "command failed (x%x)\n", rc); 4470 break; 4471 4472 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: 4473 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4474 "2548 FCF Table full count 0x%x tag 0x%x\n", 4475 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), 4476 acqe_fip->event_tag); 4477 break; 4478 4479 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 4480 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4481 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4482 "2549 FCF (x%x) disconnected from network, " 4483 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 4484 /* 4485 * If we are in the middle of FCF failover process, clear 4486 * the corresponding FCF bit in the roundrobin bitmap. 4487 */ 4488 spin_lock_irq(&phba->hbalock); 4489 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && 4490 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) { 4491 spin_unlock_irq(&phba->hbalock); 4492 /* Update FLOGI FCF failover eligible FCF bmask */ 4493 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); 4494 break; 4495 } 4496 spin_unlock_irq(&phba->hbalock); 4497 4498 /* If the event is not for currently used fcf do nothing */ 4499 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) 4500 break; 4501 4502 /* 4503 * Otherwise, request the port to rediscover the entire FCF 4504 * table for a fast recovery from case that the current FCF 4505 * is no longer valid as we are not in the middle of FCF 4506 * failover process already. 4507 */ 4508 spin_lock_irq(&phba->hbalock); 4509 /* Mark the fast failover process in progress */ 4510 phba->fcf.fcf_flag |= FCF_DEAD_DISC; 4511 spin_unlock_irq(&phba->hbalock); 4512 4513 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4514 "2771 Start FCF fast failover process due to " 4515 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " 4516 "\n", acqe_fip->event_tag, acqe_fip->index); 4517 rc = lpfc_sli4_redisc_fcf_table(phba); 4518 if (rc) { 4519 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4520 LOG_DISCOVERY, 4521 "2772 Issue FCF rediscover mabilbox " 4522 "command failed, fail through to FCF " 4523 "dead event\n"); 4524 spin_lock_irq(&phba->hbalock); 4525 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 4526 spin_unlock_irq(&phba->hbalock); 4527 /* 4528 * Last resort will fail over by treating this 4529 * as a link down to FCF registration. 4530 */ 4531 lpfc_sli4_fcf_dead_failthrough(phba); 4532 } else { 4533 /* Reset FCF roundrobin bmask for new discovery */ 4534 lpfc_sli4_clear_fcf_rr_bmask(phba); 4535 /* 4536 * Handling fast FCF failover to a DEAD FCF event is 4537 * considered equalivant to receiving CVL to all vports. 4538 */ 4539 lpfc_sli4_perform_all_vport_cvl(phba); 4540 } 4541 break; 4542 case LPFC_FIP_EVENT_TYPE_CVL: 4543 phba->fcoe_cvl_eventtag = acqe_fip->event_tag; 4544 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4545 "2718 Clear Virtual Link Received for VPI 0x%x" 4546 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 4547 4548 vport = lpfc_find_vport_by_vpid(phba, 4549 acqe_fip->index); 4550 ndlp = lpfc_sli4_perform_vport_cvl(vport); 4551 if (!ndlp) 4552 break; 4553 active_vlink_present = 0; 4554 4555 vports = lpfc_create_vport_work_array(phba); 4556 if (vports) { 4557 for (i = 0; i <= phba->max_vports && vports[i] != NULL; 4558 i++) { 4559 if ((!(vports[i]->fc_flag & 4560 FC_VPORT_CVL_RCVD)) && 4561 (vports[i]->port_state > LPFC_FDISC)) { 4562 active_vlink_present = 1; 4563 break; 4564 } 4565 } 4566 lpfc_destroy_vport_work_array(phba, vports); 4567 } 4568 4569 /* 4570 * Don't re-instantiate if vport is marked for deletion. 4571 * If we are here first then vport_delete is going to wait 4572 * for discovery to complete. 4573 */ 4574 if (!(vport->load_flag & FC_UNLOADING) && 4575 active_vlink_present) { 4576 /* 4577 * If there are other active VLinks present, 4578 * re-instantiate the Vlink using FDISC. 4579 */ 4580 mod_timer(&ndlp->nlp_delayfunc, 4581 jiffies + msecs_to_jiffies(1000)); 4582 shost = lpfc_shost_from_vport(vport); 4583 spin_lock_irq(shost->host_lock); 4584 ndlp->nlp_flag |= NLP_DELAY_TMO; 4585 spin_unlock_irq(shost->host_lock); 4586 ndlp->nlp_last_elscmd = ELS_CMD_FDISC; 4587 vport->port_state = LPFC_FDISC; 4588 } else { 4589 /* 4590 * Otherwise, we request port to rediscover 4591 * the entire FCF table for a fast recovery 4592 * from possible case that the current FCF 4593 * is no longer valid if we are not already 4594 * in the FCF failover process. 4595 */ 4596 spin_lock_irq(&phba->hbalock); 4597 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4598 spin_unlock_irq(&phba->hbalock); 4599 break; 4600 } 4601 /* Mark the fast failover process in progress */ 4602 phba->fcf.fcf_flag |= FCF_ACVL_DISC; 4603 spin_unlock_irq(&phba->hbalock); 4604 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | 4605 LOG_DISCOVERY, 4606 "2773 Start FCF failover per CVL, " 4607 "evt_tag:x%x\n", acqe_fip->event_tag); 4608 rc = lpfc_sli4_redisc_fcf_table(phba); 4609 if (rc) { 4610 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | 4611 LOG_DISCOVERY, 4612 "2774 Issue FCF rediscover " 4613 "mabilbox command failed, " 4614 "through to CVL event\n"); 4615 spin_lock_irq(&phba->hbalock); 4616 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 4617 spin_unlock_irq(&phba->hbalock); 4618 /* 4619 * Last resort will be re-try on the 4620 * the current registered FCF entry. 4621 */ 4622 lpfc_retry_pport_discovery(phba); 4623 } else 4624 /* 4625 * Reset FCF roundrobin bmask for new 4626 * discovery. 4627 */ 4628 lpfc_sli4_clear_fcf_rr_bmask(phba); 4629 } 4630 break; 4631 default: 4632 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4633 "0288 Unknown FCoE event type 0x%x event tag " 4634 "0x%x\n", event_type, acqe_fip->event_tag); 4635 break; 4636 } 4637 } 4638 4639 /** 4640 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event 4641 * @phba: pointer to lpfc hba data structure. 4642 * @acqe_link: pointer to the async dcbx completion queue entry. 4643 * 4644 * This routine is to handle the SLI4 asynchronous dcbx event. 4645 **/ 4646 static void 4647 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, 4648 struct lpfc_acqe_dcbx *acqe_dcbx) 4649 { 4650 phba->fc_eventTag = acqe_dcbx->event_tag; 4651 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4652 "0290 The SLI4 DCBX asynchronous event is not " 4653 "handled yet\n"); 4654 } 4655 4656 /** 4657 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event 4658 * @phba: pointer to lpfc hba data structure. 4659 * @acqe_link: pointer to the async grp5 completion queue entry. 4660 * 4661 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event 4662 * is an asynchronous notified of a logical link speed change. The Port 4663 * reports the logical link speed in units of 10Mbps. 4664 **/ 4665 static void 4666 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, 4667 struct lpfc_acqe_grp5 *acqe_grp5) 4668 { 4669 uint16_t prev_ll_spd; 4670 4671 phba->fc_eventTag = acqe_grp5->event_tag; 4672 phba->fcoe_eventtag = acqe_grp5->event_tag; 4673 prev_ll_spd = phba->sli4_hba.link_state.logical_speed; 4674 phba->sli4_hba.link_state.logical_speed = 4675 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10; 4676 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4677 "2789 GRP5 Async Event: Updating logical link speed " 4678 "from %dMbps to %dMbps\n", prev_ll_spd, 4679 phba->sli4_hba.link_state.logical_speed); 4680 } 4681 4682 /** 4683 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event 4684 * @phba: pointer to lpfc hba data structure. 4685 * 4686 * This routine is invoked by the worker thread to process all the pending 4687 * SLI4 asynchronous events. 4688 **/ 4689 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) 4690 { 4691 struct lpfc_cq_event *cq_event; 4692 4693 /* First, declare the async event has been handled */ 4694 spin_lock_irq(&phba->hbalock); 4695 phba->hba_flag &= ~ASYNC_EVENT; 4696 spin_unlock_irq(&phba->hbalock); 4697 /* Now, handle all the async events */ 4698 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { 4699 /* Get the first event from the head of the event queue */ 4700 spin_lock_irq(&phba->hbalock); 4701 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, 4702 cq_event, struct lpfc_cq_event, list); 4703 spin_unlock_irq(&phba->hbalock); 4704 /* Process the asynchronous event */ 4705 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { 4706 case LPFC_TRAILER_CODE_LINK: 4707 lpfc_sli4_async_link_evt(phba, 4708 &cq_event->cqe.acqe_link); 4709 break; 4710 case LPFC_TRAILER_CODE_FCOE: 4711 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); 4712 break; 4713 case LPFC_TRAILER_CODE_DCBX: 4714 lpfc_sli4_async_dcbx_evt(phba, 4715 &cq_event->cqe.acqe_dcbx); 4716 break; 4717 case LPFC_TRAILER_CODE_GRP5: 4718 lpfc_sli4_async_grp5_evt(phba, 4719 &cq_event->cqe.acqe_grp5); 4720 break; 4721 case LPFC_TRAILER_CODE_FC: 4722 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); 4723 break; 4724 case LPFC_TRAILER_CODE_SLI: 4725 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); 4726 break; 4727 default: 4728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4729 "1804 Invalid asynchrous event code: " 4730 "x%x\n", bf_get(lpfc_trailer_code, 4731 &cq_event->cqe.mcqe_cmpl)); 4732 break; 4733 } 4734 /* Free the completion event processed to the free pool */ 4735 lpfc_sli4_cq_event_release(phba, cq_event); 4736 } 4737 } 4738 4739 /** 4740 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event 4741 * @phba: pointer to lpfc hba data structure. 4742 * 4743 * This routine is invoked by the worker thread to process FCF table 4744 * rediscovery pending completion event. 4745 **/ 4746 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) 4747 { 4748 int rc; 4749 4750 spin_lock_irq(&phba->hbalock); 4751 /* Clear FCF rediscovery timeout event */ 4752 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT; 4753 /* Clear driver fast failover FCF record flag */ 4754 phba->fcf.failover_rec.flag = 0; 4755 /* Set state for FCF fast failover */ 4756 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 4757 spin_unlock_irq(&phba->hbalock); 4758 4759 /* Scan FCF table from the first entry to re-discover SAN */ 4760 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 4761 "2777 Start post-quiescent FCF table scan\n"); 4762 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 4763 if (rc) 4764 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 4765 "2747 Issue FCF scan read FCF mailbox " 4766 "command failed 0x%x\n", rc); 4767 } 4768 4769 /** 4770 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table 4771 * @phba: pointer to lpfc hba data structure. 4772 * @dev_grp: The HBA PCI-Device group number. 4773 * 4774 * This routine is invoked to set up the per HBA PCI-Device group function 4775 * API jump table entries. 4776 * 4777 * Return: 0 if success, otherwise -ENODEV 4778 **/ 4779 int 4780 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 4781 { 4782 int rc; 4783 4784 /* Set up lpfc PCI-device group */ 4785 phba->pci_dev_grp = dev_grp; 4786 4787 /* The LPFC_PCI_DEV_OC uses SLI4 */ 4788 if (dev_grp == LPFC_PCI_DEV_OC) 4789 phba->sli_rev = LPFC_SLI_REV4; 4790 4791 /* Set up device INIT API function jump table */ 4792 rc = lpfc_init_api_table_setup(phba, dev_grp); 4793 if (rc) 4794 return -ENODEV; 4795 /* Set up SCSI API function jump table */ 4796 rc = lpfc_scsi_api_table_setup(phba, dev_grp); 4797 if (rc) 4798 return -ENODEV; 4799 /* Set up SLI API function jump table */ 4800 rc = lpfc_sli_api_table_setup(phba, dev_grp); 4801 if (rc) 4802 return -ENODEV; 4803 /* Set up MBOX API function jump table */ 4804 rc = lpfc_mbox_api_table_setup(phba, dev_grp); 4805 if (rc) 4806 return -ENODEV; 4807 4808 return 0; 4809 } 4810 4811 /** 4812 * lpfc_log_intr_mode - Log the active interrupt mode 4813 * @phba: pointer to lpfc hba data structure. 4814 * @intr_mode: active interrupt mode adopted. 4815 * 4816 * This routine it invoked to log the currently used active interrupt mode 4817 * to the device. 4818 **/ 4819 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) 4820 { 4821 switch (intr_mode) { 4822 case 0: 4823 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4824 "0470 Enable INTx interrupt mode.\n"); 4825 break; 4826 case 1: 4827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4828 "0481 Enabled MSI interrupt mode.\n"); 4829 break; 4830 case 2: 4831 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4832 "0480 Enabled MSI-X interrupt mode.\n"); 4833 break; 4834 default: 4835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4836 "0482 Illegal interrupt mode.\n"); 4837 break; 4838 } 4839 return; 4840 } 4841 4842 /** 4843 * lpfc_enable_pci_dev - Enable a generic PCI device. 4844 * @phba: pointer to lpfc hba data structure. 4845 * 4846 * This routine is invoked to enable the PCI device that is common to all 4847 * PCI devices. 4848 * 4849 * Return codes 4850 * 0 - successful 4851 * other values - error 4852 **/ 4853 static int 4854 lpfc_enable_pci_dev(struct lpfc_hba *phba) 4855 { 4856 struct pci_dev *pdev; 4857 4858 /* Obtain PCI device reference */ 4859 if (!phba->pcidev) 4860 goto out_error; 4861 else 4862 pdev = phba->pcidev; 4863 /* Enable PCI device */ 4864 if (pci_enable_device_mem(pdev)) 4865 goto out_error; 4866 /* Request PCI resource for the device */ 4867 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME)) 4868 goto out_disable_device; 4869 /* Set up device as PCI master and save state for EEH */ 4870 pci_set_master(pdev); 4871 pci_try_set_mwi(pdev); 4872 pci_save_state(pdev); 4873 4874 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4875 if (pci_is_pcie(pdev)) 4876 pdev->needs_freset = 1; 4877 4878 return 0; 4879 4880 out_disable_device: 4881 pci_disable_device(pdev); 4882 out_error: 4883 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4884 "1401 Failed to enable pci device\n"); 4885 return -ENODEV; 4886 } 4887 4888 /** 4889 * lpfc_disable_pci_dev - Disable a generic PCI device. 4890 * @phba: pointer to lpfc hba data structure. 4891 * 4892 * This routine is invoked to disable the PCI device that is common to all 4893 * PCI devices. 4894 **/ 4895 static void 4896 lpfc_disable_pci_dev(struct lpfc_hba *phba) 4897 { 4898 struct pci_dev *pdev; 4899 4900 /* Obtain PCI device reference */ 4901 if (!phba->pcidev) 4902 return; 4903 else 4904 pdev = phba->pcidev; 4905 /* Release PCI resource and disable PCI device */ 4906 pci_release_mem_regions(pdev); 4907 pci_disable_device(pdev); 4908 4909 return; 4910 } 4911 4912 /** 4913 * lpfc_reset_hba - Reset a hba 4914 * @phba: pointer to lpfc hba data structure. 4915 * 4916 * This routine is invoked to reset a hba device. It brings the HBA 4917 * offline, performs a board restart, and then brings the board back 4918 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up 4919 * on outstanding mailbox commands. 4920 **/ 4921 void 4922 lpfc_reset_hba(struct lpfc_hba *phba) 4923 { 4924 /* If resets are disabled then set error state and return. */ 4925 if (!phba->cfg_enable_hba_reset) { 4926 phba->link_state = LPFC_HBA_ERROR; 4927 return; 4928 } 4929 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 4930 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 4931 else 4932 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); 4933 lpfc_offline(phba); 4934 lpfc_sli_brdrestart(phba); 4935 lpfc_online(phba); 4936 lpfc_unblock_mgmt_io(phba); 4937 } 4938 4939 /** 4940 * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions 4941 * @phba: pointer to lpfc hba data structure. 4942 * 4943 * This function enables the PCI SR-IOV virtual functions to a physical 4944 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4945 * enable the number of virtual functions to the physical function. As 4946 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4947 * API call does not considered as an error condition for most of the device. 4948 **/ 4949 uint16_t 4950 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) 4951 { 4952 struct pci_dev *pdev = phba->pcidev; 4953 uint16_t nr_virtfn; 4954 int pos; 4955 4956 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4957 if (pos == 0) 4958 return 0; 4959 4960 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn); 4961 return nr_virtfn; 4962 } 4963 4964 /** 4965 * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions 4966 * @phba: pointer to lpfc hba data structure. 4967 * @nr_vfn: number of virtual functions to be enabled. 4968 * 4969 * This function enables the PCI SR-IOV virtual functions to a physical 4970 * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to 4971 * enable the number of virtual functions to the physical function. As 4972 * not all devices support SR-IOV, the return code from the pci_enable_sriov() 4973 * API call does not considered as an error condition for most of the device. 4974 **/ 4975 int 4976 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn) 4977 { 4978 struct pci_dev *pdev = phba->pcidev; 4979 uint16_t max_nr_vfn; 4980 int rc; 4981 4982 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba); 4983 if (nr_vfn > max_nr_vfn) { 4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4985 "3057 Requested vfs (%d) greater than " 4986 "supported vfs (%d)", nr_vfn, max_nr_vfn); 4987 return -EINVAL; 4988 } 4989 4990 rc = pci_enable_sriov(pdev, nr_vfn); 4991 if (rc) { 4992 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4993 "2806 Failed to enable sriov on this device " 4994 "with vfn number nr_vf:%d, rc:%d\n", 4995 nr_vfn, rc); 4996 } else 4997 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4998 "2807 Successful enable sriov on this device " 4999 "with vfn number nr_vf:%d\n", nr_vfn); 5000 return rc; 5001 } 5002 5003 /** 5004 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. 5005 * @phba: pointer to lpfc hba data structure. 5006 * 5007 * This routine is invoked to set up the driver internal resources specific to 5008 * support the SLI-3 HBA device it attached to. 5009 * 5010 * Return codes 5011 * 0 - successful 5012 * other values - error 5013 **/ 5014 static int 5015 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) 5016 { 5017 struct lpfc_sli *psli; 5018 int rc; 5019 5020 /* 5021 * Initialize timers used by driver 5022 */ 5023 5024 /* Heartbeat timer */ 5025 init_timer(&phba->hb_tmofunc); 5026 phba->hb_tmofunc.function = lpfc_hb_timeout; 5027 phba->hb_tmofunc.data = (unsigned long)phba; 5028 5029 psli = &phba->sli; 5030 /* MBOX heartbeat timer */ 5031 init_timer(&psli->mbox_tmo); 5032 psli->mbox_tmo.function = lpfc_mbox_timeout; 5033 psli->mbox_tmo.data = (unsigned long) phba; 5034 /* FCP polling mode timer */ 5035 init_timer(&phba->fcp_poll_timer); 5036 phba->fcp_poll_timer.function = lpfc_poll_timeout; 5037 phba->fcp_poll_timer.data = (unsigned long) phba; 5038 /* Fabric block timer */ 5039 init_timer(&phba->fabric_block_timer); 5040 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5041 phba->fabric_block_timer.data = (unsigned long) phba; 5042 /* EA polling mode timer */ 5043 init_timer(&phba->eratt_poll); 5044 phba->eratt_poll.function = lpfc_poll_eratt; 5045 phba->eratt_poll.data = (unsigned long) phba; 5046 5047 /* Host attention work mask setup */ 5048 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); 5049 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); 5050 5051 /* Get all the module params for configuring this host */ 5052 lpfc_get_cfgparam(phba); 5053 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { 5054 phba->menlo_flag |= HBA_MENLO_SUPPORT; 5055 /* check for menlo minimum sg count */ 5056 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) 5057 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 5058 } 5059 5060 if (!phba->sli.ring) 5061 phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING * 5062 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5063 if (!phba->sli.ring) 5064 return -ENOMEM; 5065 5066 /* 5067 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5068 * used to create the sg_dma_buf_pool must be dynamically calculated. 5069 */ 5070 5071 /* Initialize the host templates the configured values. */ 5072 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5073 lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt; 5074 5075 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ 5076 if (phba->cfg_enable_bg) { 5077 /* 5078 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5079 * the FCP rsp, and a BDE for each. Sice we have no control 5080 * over how many protection data segments the SCSI Layer 5081 * will hand us (ie: there could be one for every block 5082 * in the IO), we just allocate enough BDEs to accomidate 5083 * our max amount and we need to limit lpfc_sg_seg_cnt to 5084 * minimize the risk of running out. 5085 */ 5086 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5087 sizeof(struct fcp_rsp) + 5088 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); 5089 5090 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) 5091 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; 5092 5093 /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ 5094 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; 5095 } else { 5096 /* 5097 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5098 * the FCP rsp, a BDE for each, and a BDE for up to 5099 * cfg_sg_seg_cnt data segments. 5100 */ 5101 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5102 sizeof(struct fcp_rsp) + 5103 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); 5104 5105 /* Total BDEs in BPL for scsi_sg_list */ 5106 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5107 } 5108 5109 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5110 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", 5111 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5112 phba->cfg_total_seg_cnt); 5113 5114 phba->max_vpi = LPFC_MAX_VPI; 5115 /* This will be set to correct value after config_port mbox */ 5116 phba->max_vports = 0; 5117 5118 /* 5119 * Initialize the SLI Layer to run with lpfc HBAs. 5120 */ 5121 lpfc_sli_setup(phba); 5122 lpfc_sli_queue_setup(phba); 5123 5124 /* Allocate device driver memory */ 5125 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) 5126 return -ENOMEM; 5127 5128 /* 5129 * Enable sr-iov virtual functions if supported and configured 5130 * through the module parameter. 5131 */ 5132 if (phba->cfg_sriov_nr_virtfn > 0) { 5133 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5134 phba->cfg_sriov_nr_virtfn); 5135 if (rc) { 5136 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5137 "2808 Requested number of SR-IOV " 5138 "virtual functions (%d) is not " 5139 "supported\n", 5140 phba->cfg_sriov_nr_virtfn); 5141 phba->cfg_sriov_nr_virtfn = 0; 5142 } 5143 } 5144 5145 return 0; 5146 } 5147 5148 /** 5149 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev 5150 * @phba: pointer to lpfc hba data structure. 5151 * 5152 * This routine is invoked to unset the driver internal resources set up 5153 * specific for supporting the SLI-3 HBA device it attached to. 5154 **/ 5155 static void 5156 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) 5157 { 5158 /* Free device driver memory allocated */ 5159 lpfc_mem_free_all(phba); 5160 5161 return; 5162 } 5163 5164 /** 5165 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev 5166 * @phba: pointer to lpfc hba data structure. 5167 * 5168 * This routine is invoked to set up the driver internal resources specific to 5169 * support the SLI-4 HBA device it attached to. 5170 * 5171 * Return codes 5172 * 0 - successful 5173 * other values - error 5174 **/ 5175 static int 5176 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) 5177 { 5178 struct lpfc_vector_map_info *cpup; 5179 struct lpfc_sli *psli; 5180 LPFC_MBOXQ_t *mboxq; 5181 int rc, i, hbq_count, max_buf_size; 5182 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 5183 struct lpfc_mqe *mqe; 5184 int longs; 5185 int fof_vectors = 0; 5186 5187 /* Get all the module params for configuring this host */ 5188 lpfc_get_cfgparam(phba); 5189 5190 /* Before proceed, wait for POST done and device ready */ 5191 rc = lpfc_sli4_post_status_check(phba); 5192 if (rc) 5193 return -ENODEV; 5194 5195 /* 5196 * Initialize timers used by driver 5197 */ 5198 5199 /* Heartbeat timer */ 5200 init_timer(&phba->hb_tmofunc); 5201 phba->hb_tmofunc.function = lpfc_hb_timeout; 5202 phba->hb_tmofunc.data = (unsigned long)phba; 5203 init_timer(&phba->rrq_tmr); 5204 phba->rrq_tmr.function = lpfc_rrq_timeout; 5205 phba->rrq_tmr.data = (unsigned long)phba; 5206 5207 psli = &phba->sli; 5208 /* MBOX heartbeat timer */ 5209 init_timer(&psli->mbox_tmo); 5210 psli->mbox_tmo.function = lpfc_mbox_timeout; 5211 psli->mbox_tmo.data = (unsigned long) phba; 5212 /* Fabric block timer */ 5213 init_timer(&phba->fabric_block_timer); 5214 phba->fabric_block_timer.function = lpfc_fabric_block_timeout; 5215 phba->fabric_block_timer.data = (unsigned long) phba; 5216 /* EA polling mode timer */ 5217 init_timer(&phba->eratt_poll); 5218 phba->eratt_poll.function = lpfc_poll_eratt; 5219 phba->eratt_poll.data = (unsigned long) phba; 5220 /* FCF rediscover timer */ 5221 init_timer(&phba->fcf.redisc_wait); 5222 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo; 5223 phba->fcf.redisc_wait.data = (unsigned long)phba; 5224 5225 /* 5226 * Control structure for handling external multi-buffer mailbox 5227 * command pass-through. 5228 */ 5229 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0, 5230 sizeof(struct lpfc_mbox_ext_buf_ctx)); 5231 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); 5232 5233 phba->max_vpi = LPFC_MAX_VPI; 5234 5235 /* This will be set to correct value after the read_config mbox */ 5236 phba->max_vports = 0; 5237 5238 /* Program the default value of vlan_id and fc_map */ 5239 phba->valid_vlan = 0; 5240 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5241 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5242 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5243 5244 /* 5245 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands 5246 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. 5247 */ 5248 if (!phba->sli.ring) 5249 phba->sli.ring = kzalloc( 5250 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) * 5251 sizeof(struct lpfc_sli_ring), GFP_KERNEL); 5252 if (!phba->sli.ring) 5253 return -ENOMEM; 5254 5255 /* 5256 * It doesn't matter what family our adapter is in, we are 5257 * limited to 2 Pages, 512 SGEs, for our SGL. 5258 * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp 5259 */ 5260 max_buf_size = (2 * SLI4_PAGE_SIZE); 5261 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) 5262 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; 5263 5264 /* 5265 * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size 5266 * used to create the sg_dma_buf_pool must be dynamically calculated. 5267 */ 5268 5269 if (phba->cfg_enable_bg) { 5270 /* 5271 * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, 5272 * the FCP rsp, and a SGE for each. Sice we have no control 5273 * over how many protection data segments the SCSI Layer 5274 * will hand us (ie: there could be one for every block 5275 * in the IO), we just allocate enough SGEs to accomidate 5276 * our max amount and we need to limit lpfc_sg_seg_cnt to 5277 * minimize the risk of running out. 5278 */ 5279 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5280 sizeof(struct fcp_rsp) + max_buf_size; 5281 5282 /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ 5283 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; 5284 5285 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) 5286 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; 5287 } else { 5288 /* 5289 * The scsi_buf for a regular I/O will hold the FCP cmnd, 5290 * the FCP rsp, a SGE for each, and a SGE for up to 5291 * cfg_sg_seg_cnt data segments. 5292 */ 5293 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + 5294 sizeof(struct fcp_rsp) + 5295 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); 5296 5297 /* Total SGEs for scsi_sg_list */ 5298 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; 5299 /* 5300 * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need 5301 * to post 1 page for the SGL. 5302 */ 5303 } 5304 5305 /* Initialize the host templates with the updated values. */ 5306 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5307 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; 5308 5309 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) 5310 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; 5311 else 5312 phba->cfg_sg_dma_buf_size = 5313 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); 5314 5315 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, 5316 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", 5317 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, 5318 phba->cfg_total_seg_cnt); 5319 5320 /* Initialize buffer queue management fields */ 5321 hbq_count = lpfc_sli_hbq_count(); 5322 for (i = 0; i < hbq_count; ++i) 5323 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 5324 INIT_LIST_HEAD(&phba->rb_pend_list); 5325 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; 5326 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; 5327 5328 /* 5329 * Initialize the SLI Layer to run with lpfc SLI4 HBAs. 5330 */ 5331 /* Initialize the Abort scsi buffer list used by driver */ 5332 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); 5333 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 5334 /* This abort list used by worker thread */ 5335 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); 5336 5337 /* 5338 * Initialize driver internal slow-path work queues 5339 */ 5340 5341 /* Driver internel slow-path CQ Event pool */ 5342 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); 5343 /* Response IOCB work queue list */ 5344 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event); 5345 /* Asynchronous event CQ Event work queue list */ 5346 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); 5347 /* Fast-path XRI aborted CQ Event work queue list */ 5348 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 5349 /* Slow-path XRI aborted CQ Event work queue list */ 5350 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); 5351 /* Receive queue CQ Event work queue list */ 5352 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); 5353 5354 /* Initialize extent block lists. */ 5355 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list); 5356 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list); 5357 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list); 5358 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list); 5359 5360 /* initialize optic_state to 0xFF */ 5361 phba->sli4_hba.lnk_info.optic_state = 0xff; 5362 5363 /* Initialize the driver internal SLI layer lists. */ 5364 lpfc_sli_setup(phba); 5365 lpfc_sli_queue_setup(phba); 5366 5367 /* Allocate device driver memory */ 5368 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); 5369 if (rc) 5370 return -ENOMEM; 5371 5372 /* IF Type 2 ports get initialized now. */ 5373 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5374 LPFC_SLI_INTF_IF_TYPE_2) { 5375 rc = lpfc_pci_function_reset(phba); 5376 if (unlikely(rc)) 5377 return -ENODEV; 5378 phba->temp_sensor_support = 1; 5379 } 5380 5381 /* Create the bootstrap mailbox command */ 5382 rc = lpfc_create_bootstrap_mbox(phba); 5383 if (unlikely(rc)) 5384 goto out_free_mem; 5385 5386 /* Set up the host's endian order with the device. */ 5387 rc = lpfc_setup_endian_order(phba); 5388 if (unlikely(rc)) 5389 goto out_free_bsmbx; 5390 5391 /* Set up the hba's configuration parameters. */ 5392 rc = lpfc_sli4_read_config(phba); 5393 if (unlikely(rc)) 5394 goto out_free_bsmbx; 5395 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); 5396 if (unlikely(rc)) 5397 goto out_free_bsmbx; 5398 5399 /* IF Type 0 ports get initialized now. */ 5400 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 5401 LPFC_SLI_INTF_IF_TYPE_0) { 5402 rc = lpfc_pci_function_reset(phba); 5403 if (unlikely(rc)) 5404 goto out_free_bsmbx; 5405 } 5406 5407 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 5408 GFP_KERNEL); 5409 if (!mboxq) { 5410 rc = -ENOMEM; 5411 goto out_free_bsmbx; 5412 } 5413 5414 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ 5415 lpfc_supported_pages(mboxq); 5416 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5417 if (!rc) { 5418 mqe = &mboxq->u.mqe; 5419 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), 5420 LPFC_MAX_SUPPORTED_PAGES); 5421 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { 5422 switch (pn_page[i]) { 5423 case LPFC_SLI4_PARAMETERS: 5424 phba->sli4_hba.pc_sli4_params.supported = 1; 5425 break; 5426 default: 5427 break; 5428 } 5429 } 5430 /* Read the port's SLI4 Parameters capabilities if supported. */ 5431 if (phba->sli4_hba.pc_sli4_params.supported) 5432 rc = lpfc_pc_sli4_params_get(phba, mboxq); 5433 if (rc) { 5434 mempool_free(mboxq, phba->mbox_mem_pool); 5435 rc = -EIO; 5436 goto out_free_bsmbx; 5437 } 5438 } 5439 5440 /* 5441 * Get sli4 parameters that override parameters from Port capabilities. 5442 * If this call fails, it isn't critical unless the SLI4 parameters come 5443 * back in conflict. 5444 */ 5445 rc = lpfc_get_sli4_parameters(phba, mboxq); 5446 if (rc) { 5447 if (phba->sli4_hba.extents_in_use && 5448 phba->sli4_hba.rpi_hdrs_in_use) { 5449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5450 "2999 Unsupported SLI4 Parameters " 5451 "Extents and RPI headers enabled.\n"); 5452 goto out_free_bsmbx; 5453 } 5454 } 5455 mempool_free(mboxq, phba->mbox_mem_pool); 5456 5457 /* Verify OAS is supported */ 5458 lpfc_sli4_oas_verify(phba); 5459 if (phba->cfg_fof) 5460 fof_vectors = 1; 5461 5462 /* Verify all the SLI4 queues */ 5463 rc = lpfc_sli4_queue_verify(phba); 5464 if (rc) 5465 goto out_free_bsmbx; 5466 5467 /* Create driver internal CQE event pool */ 5468 rc = lpfc_sli4_cq_event_pool_create(phba); 5469 if (rc) 5470 goto out_free_bsmbx; 5471 5472 /* Initialize sgl lists per host */ 5473 lpfc_init_sgl_list(phba); 5474 5475 /* Allocate and initialize active sgl array */ 5476 rc = lpfc_init_active_sgl_array(phba); 5477 if (rc) { 5478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5479 "1430 Failed to initialize sgl list.\n"); 5480 goto out_destroy_cq_event_pool; 5481 } 5482 rc = lpfc_sli4_init_rpi_hdrs(phba); 5483 if (rc) { 5484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5485 "1432 Failed to initialize rpi headers.\n"); 5486 goto out_free_active_sgl; 5487 } 5488 5489 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ 5490 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; 5491 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), 5492 GFP_KERNEL); 5493 if (!phba->fcf.fcf_rr_bmask) { 5494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5495 "2759 Failed allocate memory for FCF round " 5496 "robin failover bmask\n"); 5497 rc = -ENOMEM; 5498 goto out_remove_rpi_hdrs; 5499 } 5500 5501 phba->sli4_hba.fcp_eq_hdl = 5502 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 5503 (fof_vectors + phba->cfg_fcp_io_channel)), 5504 GFP_KERNEL); 5505 if (!phba->sli4_hba.fcp_eq_hdl) { 5506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5507 "2572 Failed allocate memory for " 5508 "fast-path per-EQ handle array\n"); 5509 rc = -ENOMEM; 5510 goto out_free_fcf_rr_bmask; 5511 } 5512 5513 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 5514 (fof_vectors + 5515 phba->cfg_fcp_io_channel)), GFP_KERNEL); 5516 if (!phba->sli4_hba.msix_entries) { 5517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5518 "2573 Failed allocate memory for msi-x " 5519 "interrupt vector entries\n"); 5520 rc = -ENOMEM; 5521 goto out_free_fcp_eq_hdl; 5522 } 5523 5524 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * 5525 phba->sli4_hba.num_present_cpu), 5526 GFP_KERNEL); 5527 if (!phba->sli4_hba.cpu_map) { 5528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5529 "3327 Failed allocate memory for msi-x " 5530 "interrupt vector mapping\n"); 5531 rc = -ENOMEM; 5532 goto out_free_msix; 5533 } 5534 if (lpfc_used_cpu == NULL) { 5535 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), 5536 GFP_KERNEL); 5537 if (!lpfc_used_cpu) { 5538 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5539 "3335 Failed allocate memory for msi-x " 5540 "interrupt vector mapping\n"); 5541 kfree(phba->sli4_hba.cpu_map); 5542 rc = -ENOMEM; 5543 goto out_free_msix; 5544 } 5545 for (i = 0; i < lpfc_present_cpu; i++) 5546 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; 5547 } 5548 5549 /* Initialize io channels for round robin */ 5550 cpup = phba->sli4_hba.cpu_map; 5551 rc = 0; 5552 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 5553 cpup->channel_id = rc; 5554 rc++; 5555 if (rc >= phba->cfg_fcp_io_channel) 5556 rc = 0; 5557 } 5558 5559 /* 5560 * Enable sr-iov virtual functions if supported and configured 5561 * through the module parameter. 5562 */ 5563 if (phba->cfg_sriov_nr_virtfn > 0) { 5564 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, 5565 phba->cfg_sriov_nr_virtfn); 5566 if (rc) { 5567 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5568 "3020 Requested number of SR-IOV " 5569 "virtual functions (%d) is not " 5570 "supported\n", 5571 phba->cfg_sriov_nr_virtfn); 5572 phba->cfg_sriov_nr_virtfn = 0; 5573 } 5574 } 5575 5576 return 0; 5577 5578 out_free_msix: 5579 kfree(phba->sli4_hba.msix_entries); 5580 out_free_fcp_eq_hdl: 5581 kfree(phba->sli4_hba.fcp_eq_hdl); 5582 out_free_fcf_rr_bmask: 5583 kfree(phba->fcf.fcf_rr_bmask); 5584 out_remove_rpi_hdrs: 5585 lpfc_sli4_remove_rpi_hdrs(phba); 5586 out_free_active_sgl: 5587 lpfc_free_active_sgl(phba); 5588 out_destroy_cq_event_pool: 5589 lpfc_sli4_cq_event_pool_destroy(phba); 5590 out_free_bsmbx: 5591 lpfc_destroy_bootstrap_mbox(phba); 5592 out_free_mem: 5593 lpfc_mem_free(phba); 5594 return rc; 5595 } 5596 5597 /** 5598 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev 5599 * @phba: pointer to lpfc hba data structure. 5600 * 5601 * This routine is invoked to unset the driver internal resources set up 5602 * specific for supporting the SLI-4 HBA device it attached to. 5603 **/ 5604 static void 5605 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) 5606 { 5607 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 5608 5609 /* Free memory allocated for msi-x interrupt vector to CPU mapping */ 5610 kfree(phba->sli4_hba.cpu_map); 5611 phba->sli4_hba.num_present_cpu = 0; 5612 phba->sli4_hba.num_online_cpu = 0; 5613 phba->sli4_hba.curr_disp_cpu = 0; 5614 5615 /* Free memory allocated for msi-x interrupt vector entries */ 5616 kfree(phba->sli4_hba.msix_entries); 5617 5618 /* Free memory allocated for fast-path work queue handles */ 5619 kfree(phba->sli4_hba.fcp_eq_hdl); 5620 5621 /* Free the allocated rpi headers. */ 5622 lpfc_sli4_remove_rpi_hdrs(phba); 5623 lpfc_sli4_remove_rpis(phba); 5624 5625 /* Free eligible FCF index bmask */ 5626 kfree(phba->fcf.fcf_rr_bmask); 5627 5628 /* Free the ELS sgl list */ 5629 lpfc_free_active_sgl(phba); 5630 lpfc_free_els_sgl_list(phba); 5631 5632 /* Free the completion queue EQ event pool */ 5633 lpfc_sli4_cq_event_release_all(phba); 5634 lpfc_sli4_cq_event_pool_destroy(phba); 5635 5636 /* Release resource identifiers. */ 5637 lpfc_sli4_dealloc_resource_identifiers(phba); 5638 5639 /* Free the bsmbx region. */ 5640 lpfc_destroy_bootstrap_mbox(phba); 5641 5642 /* Free the SLI Layer memory with SLI4 HBAs */ 5643 lpfc_mem_free_all(phba); 5644 5645 /* Free the current connect table */ 5646 list_for_each_entry_safe(conn_entry, next_conn_entry, 5647 &phba->fcf_conn_rec_list, list) { 5648 list_del_init(&conn_entry->list); 5649 kfree(conn_entry); 5650 } 5651 5652 return; 5653 } 5654 5655 /** 5656 * lpfc_init_api_table_setup - Set up init api function jump table 5657 * @phba: The hba struct for which this call is being executed. 5658 * @dev_grp: The HBA PCI-Device group number. 5659 * 5660 * This routine sets up the device INIT interface API function jump table 5661 * in @phba struct. 5662 * 5663 * Returns: 0 - success, -ENODEV - failure. 5664 **/ 5665 int 5666 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 5667 { 5668 phba->lpfc_hba_init_link = lpfc_hba_init_link; 5669 phba->lpfc_hba_down_link = lpfc_hba_down_link; 5670 phba->lpfc_selective_reset = lpfc_selective_reset; 5671 switch (dev_grp) { 5672 case LPFC_PCI_DEV_LP: 5673 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; 5674 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; 5675 phba->lpfc_stop_port = lpfc_stop_port_s3; 5676 break; 5677 case LPFC_PCI_DEV_OC: 5678 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; 5679 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; 5680 phba->lpfc_stop_port = lpfc_stop_port_s4; 5681 break; 5682 default: 5683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5684 "1431 Invalid HBA PCI-device group: 0x%x\n", 5685 dev_grp); 5686 return -ENODEV; 5687 break; 5688 } 5689 return 0; 5690 } 5691 5692 /** 5693 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. 5694 * @phba: pointer to lpfc hba data structure. 5695 * 5696 * This routine is invoked to set up the driver internal resources before the 5697 * device specific resource setup to support the HBA device it attached to. 5698 * 5699 * Return codes 5700 * 0 - successful 5701 * other values - error 5702 **/ 5703 static int 5704 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) 5705 { 5706 /* 5707 * Driver resources common to all SLI revisions 5708 */ 5709 atomic_set(&phba->fast_event_count, 0); 5710 spin_lock_init(&phba->hbalock); 5711 5712 /* Initialize ndlp management spinlock */ 5713 spin_lock_init(&phba->ndlp_lock); 5714 5715 INIT_LIST_HEAD(&phba->port_list); 5716 INIT_LIST_HEAD(&phba->work_list); 5717 init_waitqueue_head(&phba->wait_4_mlo_m_q); 5718 5719 /* Initialize the wait queue head for the kernel thread */ 5720 init_waitqueue_head(&phba->work_waitq); 5721 5722 /* Initialize the scsi buffer list used by driver for scsi IO */ 5723 spin_lock_init(&phba->scsi_buf_list_get_lock); 5724 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); 5725 spin_lock_init(&phba->scsi_buf_list_put_lock); 5726 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); 5727 5728 /* Initialize the fabric iocb list */ 5729 INIT_LIST_HEAD(&phba->fabric_iocb_list); 5730 5731 /* Initialize list to save ELS buffers */ 5732 INIT_LIST_HEAD(&phba->elsbuf); 5733 5734 /* Initialize FCF connection rec list */ 5735 INIT_LIST_HEAD(&phba->fcf_conn_rec_list); 5736 5737 /* Initialize OAS configuration list */ 5738 spin_lock_init(&phba->devicelock); 5739 INIT_LIST_HEAD(&phba->luns); 5740 5741 return 0; 5742 } 5743 5744 /** 5745 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. 5746 * @phba: pointer to lpfc hba data structure. 5747 * 5748 * This routine is invoked to set up the driver internal resources after the 5749 * device specific resource setup to support the HBA device it attached to. 5750 * 5751 * Return codes 5752 * 0 - successful 5753 * other values - error 5754 **/ 5755 static int 5756 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) 5757 { 5758 int error; 5759 5760 /* Startup the kernel thread for this host adapter. */ 5761 phba->worker_thread = kthread_run(lpfc_do_work, phba, 5762 "lpfc_worker_%d", phba->brd_no); 5763 if (IS_ERR(phba->worker_thread)) { 5764 error = PTR_ERR(phba->worker_thread); 5765 return error; 5766 } 5767 5768 return 0; 5769 } 5770 5771 /** 5772 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. 5773 * @phba: pointer to lpfc hba data structure. 5774 * 5775 * This routine is invoked to unset the driver internal resources set up after 5776 * the device specific resource setup for supporting the HBA device it 5777 * attached to. 5778 **/ 5779 static void 5780 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) 5781 { 5782 /* Stop kernel worker thread */ 5783 kthread_stop(phba->worker_thread); 5784 } 5785 5786 /** 5787 * lpfc_free_iocb_list - Free iocb list. 5788 * @phba: pointer to lpfc hba data structure. 5789 * 5790 * This routine is invoked to free the driver's IOCB list and memory. 5791 **/ 5792 static void 5793 lpfc_free_iocb_list(struct lpfc_hba *phba) 5794 { 5795 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 5796 5797 spin_lock_irq(&phba->hbalock); 5798 list_for_each_entry_safe(iocbq_entry, iocbq_next, 5799 &phba->lpfc_iocb_list, list) { 5800 list_del(&iocbq_entry->list); 5801 kfree(iocbq_entry); 5802 phba->total_iocbq_bufs--; 5803 } 5804 spin_unlock_irq(&phba->hbalock); 5805 5806 return; 5807 } 5808 5809 /** 5810 * lpfc_init_iocb_list - Allocate and initialize iocb list. 5811 * @phba: pointer to lpfc hba data structure. 5812 * 5813 * This routine is invoked to allocate and initizlize the driver's IOCB 5814 * list and set up the IOCB tag array accordingly. 5815 * 5816 * Return codes 5817 * 0 - successful 5818 * other values - error 5819 **/ 5820 static int 5821 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) 5822 { 5823 struct lpfc_iocbq *iocbq_entry = NULL; 5824 uint16_t iotag; 5825 int i; 5826 5827 /* Initialize and populate the iocb list per host. */ 5828 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 5829 for (i = 0; i < iocb_count; i++) { 5830 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); 5831 if (iocbq_entry == NULL) { 5832 printk(KERN_ERR "%s: only allocated %d iocbs of " 5833 "expected %d count. Unloading driver.\n", 5834 __func__, i, LPFC_IOCB_LIST_CNT); 5835 goto out_free_iocbq; 5836 } 5837 5838 iotag = lpfc_sli_next_iotag(phba, iocbq_entry); 5839 if (iotag == 0) { 5840 kfree(iocbq_entry); 5841 printk(KERN_ERR "%s: failed to allocate IOTAG. " 5842 "Unloading driver.\n", __func__); 5843 goto out_free_iocbq; 5844 } 5845 iocbq_entry->sli4_lxritag = NO_XRI; 5846 iocbq_entry->sli4_xritag = NO_XRI; 5847 5848 spin_lock_irq(&phba->hbalock); 5849 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 5850 phba->total_iocbq_bufs++; 5851 spin_unlock_irq(&phba->hbalock); 5852 } 5853 5854 return 0; 5855 5856 out_free_iocbq: 5857 lpfc_free_iocb_list(phba); 5858 5859 return -ENOMEM; 5860 } 5861 5862 /** 5863 * lpfc_free_sgl_list - Free a given sgl list. 5864 * @phba: pointer to lpfc hba data structure. 5865 * @sglq_list: pointer to the head of sgl list. 5866 * 5867 * This routine is invoked to free a give sgl list and memory. 5868 **/ 5869 void 5870 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list) 5871 { 5872 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 5873 5874 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) { 5875 list_del(&sglq_entry->list); 5876 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); 5877 kfree(sglq_entry); 5878 } 5879 } 5880 5881 /** 5882 * lpfc_free_els_sgl_list - Free els sgl list. 5883 * @phba: pointer to lpfc hba data structure. 5884 * 5885 * This routine is invoked to free the driver's els sgl list and memory. 5886 **/ 5887 static void 5888 lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5889 { 5890 LIST_HEAD(sglq_list); 5891 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 5892 5893 /* Retrieve all els sgls from driver list */ 5894 spin_lock_irq(&phba->hbalock); 5895 spin_lock(&pring->ring_lock); 5896 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5897 spin_unlock(&pring->ring_lock); 5898 spin_unlock_irq(&phba->hbalock); 5899 5900 /* Now free the sgl list */ 5901 lpfc_free_sgl_list(phba, &sglq_list); 5902 } 5903 5904 /** 5905 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. 5906 * @phba: pointer to lpfc hba data structure. 5907 * 5908 * This routine is invoked to allocate the driver's active sgl memory. 5909 * This array will hold the sglq_entry's for active IOs. 5910 **/ 5911 static int 5912 lpfc_init_active_sgl_array(struct lpfc_hba *phba) 5913 { 5914 int size; 5915 size = sizeof(struct lpfc_sglq *); 5916 size *= phba->sli4_hba.max_cfg_param.max_xri; 5917 5918 phba->sli4_hba.lpfc_sglq_active_list = 5919 kzalloc(size, GFP_KERNEL); 5920 if (!phba->sli4_hba.lpfc_sglq_active_list) 5921 return -ENOMEM; 5922 return 0; 5923 } 5924 5925 /** 5926 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. 5927 * @phba: pointer to lpfc hba data structure. 5928 * 5929 * This routine is invoked to walk through the array of active sglq entries 5930 * and free all of the resources. 5931 * This is just a place holder for now. 5932 **/ 5933 static void 5934 lpfc_free_active_sgl(struct lpfc_hba *phba) 5935 { 5936 kfree(phba->sli4_hba.lpfc_sglq_active_list); 5937 } 5938 5939 /** 5940 * lpfc_init_sgl_list - Allocate and initialize sgl list. 5941 * @phba: pointer to lpfc hba data structure. 5942 * 5943 * This routine is invoked to allocate and initizlize the driver's sgl 5944 * list and set up the sgl xritag tag array accordingly. 5945 * 5946 **/ 5947 static void 5948 lpfc_init_sgl_list(struct lpfc_hba *phba) 5949 { 5950 /* Initialize and populate the sglq list per host/VF. */ 5951 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); 5952 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); 5953 5954 /* els xri-sgl book keeping */ 5955 phba->sli4_hba.els_xri_cnt = 0; 5956 5957 /* scsi xri-buffer book keeping */ 5958 phba->sli4_hba.scsi_xri_cnt = 0; 5959 } 5960 5961 /** 5962 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port 5963 * @phba: pointer to lpfc hba data structure. 5964 * 5965 * This routine is invoked to post rpi header templates to the 5966 * port for those SLI4 ports that do not support extents. This routine 5967 * posts a PAGE_SIZE memory region to the port to hold up to 5968 * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine 5969 * and should be called only when interrupts are disabled. 5970 * 5971 * Return codes 5972 * 0 - successful 5973 * -ERROR - otherwise. 5974 **/ 5975 int 5976 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) 5977 { 5978 int rc = 0; 5979 struct lpfc_rpi_hdr *rpi_hdr; 5980 5981 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); 5982 if (!phba->sli4_hba.rpi_hdrs_in_use) 5983 return rc; 5984 if (phba->sli4_hba.extents_in_use) 5985 return -EIO; 5986 5987 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 5988 if (!rpi_hdr) { 5989 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5990 "0391 Error during rpi post operation\n"); 5991 lpfc_sli4_remove_rpis(phba); 5992 rc = -ENODEV; 5993 } 5994 5995 return rc; 5996 } 5997 5998 /** 5999 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region 6000 * @phba: pointer to lpfc hba data structure. 6001 * 6002 * This routine is invoked to allocate a single 4KB memory region to 6003 * support rpis and stores them in the phba. This single region 6004 * provides support for up to 64 rpis. The region is used globally 6005 * by the device. 6006 * 6007 * Returns: 6008 * A valid rpi hdr on success. 6009 * A NULL pointer on any failure. 6010 **/ 6011 struct lpfc_rpi_hdr * 6012 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) 6013 { 6014 uint16_t rpi_limit, curr_rpi_range; 6015 struct lpfc_dmabuf *dmabuf; 6016 struct lpfc_rpi_hdr *rpi_hdr; 6017 uint32_t rpi_count; 6018 6019 /* 6020 * If the SLI4 port supports extents, posting the rpi header isn't 6021 * required. Set the expected maximum count and let the actual value 6022 * get set when extents are fully allocated. 6023 */ 6024 if (!phba->sli4_hba.rpi_hdrs_in_use) 6025 return NULL; 6026 if (phba->sli4_hba.extents_in_use) 6027 return NULL; 6028 6029 /* The limit on the logical index is just the max_rpi count. */ 6030 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + 6031 phba->sli4_hba.max_cfg_param.max_rpi - 1; 6032 6033 spin_lock_irq(&phba->hbalock); 6034 /* 6035 * Establish the starting RPI in this header block. The starting 6036 * rpi is normalized to a zero base because the physical rpi is 6037 * port based. 6038 */ 6039 curr_rpi_range = phba->sli4_hba.next_rpi; 6040 spin_unlock_irq(&phba->hbalock); 6041 6042 /* 6043 * The port has a limited number of rpis. The increment here 6044 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value 6045 * and to allow the full max_rpi range per port. 6046 */ 6047 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) 6048 rpi_count = rpi_limit - curr_rpi_range; 6049 else 6050 rpi_count = LPFC_RPI_HDR_COUNT; 6051 6052 if (!rpi_count) 6053 return NULL; 6054 /* 6055 * First allocate the protocol header region for the port. The 6056 * port expects a 4KB DMA-mapped memory region that is 4K aligned. 6057 */ 6058 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6059 if (!dmabuf) 6060 return NULL; 6061 6062 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6063 LPFC_HDR_TEMPLATE_SIZE, 6064 &dmabuf->phys, GFP_KERNEL); 6065 if (!dmabuf->virt) { 6066 rpi_hdr = NULL; 6067 goto err_free_dmabuf; 6068 } 6069 6070 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 6071 rpi_hdr = NULL; 6072 goto err_free_coherent; 6073 } 6074 6075 /* Save the rpi header data for cleanup later. */ 6076 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); 6077 if (!rpi_hdr) 6078 goto err_free_coherent; 6079 6080 rpi_hdr->dmabuf = dmabuf; 6081 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; 6082 rpi_hdr->page_count = 1; 6083 spin_lock_irq(&phba->hbalock); 6084 6085 /* The rpi_hdr stores the logical index only. */ 6086 rpi_hdr->start_rpi = curr_rpi_range; 6087 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); 6088 6089 /* 6090 * The next_rpi stores the next logical module-64 rpi value used 6091 * to post physical rpis in subsequent rpi postings. 6092 */ 6093 phba->sli4_hba.next_rpi += rpi_count; 6094 spin_unlock_irq(&phba->hbalock); 6095 return rpi_hdr; 6096 6097 err_free_coherent: 6098 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, 6099 dmabuf->virt, dmabuf->phys); 6100 err_free_dmabuf: 6101 kfree(dmabuf); 6102 return NULL; 6103 } 6104 6105 /** 6106 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions 6107 * @phba: pointer to lpfc hba data structure. 6108 * 6109 * This routine is invoked to remove all memory resources allocated 6110 * to support rpis for SLI4 ports not supporting extents. This routine 6111 * presumes the caller has released all rpis consumed by fabric or port 6112 * logins and is prepared to have the header pages removed. 6113 **/ 6114 void 6115 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) 6116 { 6117 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; 6118 6119 if (!phba->sli4_hba.rpi_hdrs_in_use) 6120 goto exit; 6121 6122 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, 6123 &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 6124 list_del(&rpi_hdr->list); 6125 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, 6126 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); 6127 kfree(rpi_hdr->dmabuf); 6128 kfree(rpi_hdr); 6129 } 6130 exit: 6131 /* There are no rpis available to the port now. */ 6132 phba->sli4_hba.next_rpi = 0; 6133 } 6134 6135 /** 6136 * lpfc_hba_alloc - Allocate driver hba data structure for a device. 6137 * @pdev: pointer to pci device data structure. 6138 * 6139 * This routine is invoked to allocate the driver hba data structure for an 6140 * HBA device. If the allocation is successful, the phba reference to the 6141 * PCI device data structure is set. 6142 * 6143 * Return codes 6144 * pointer to @phba - successful 6145 * NULL - error 6146 **/ 6147 static struct lpfc_hba * 6148 lpfc_hba_alloc(struct pci_dev *pdev) 6149 { 6150 struct lpfc_hba *phba; 6151 6152 /* Allocate memory for HBA structure */ 6153 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); 6154 if (!phba) { 6155 dev_err(&pdev->dev, "failed to allocate hba struct\n"); 6156 return NULL; 6157 } 6158 6159 /* Set reference to PCI device in HBA structure */ 6160 phba->pcidev = pdev; 6161 6162 /* Assign an unused board number */ 6163 phba->brd_no = lpfc_get_instance(); 6164 if (phba->brd_no < 0) { 6165 kfree(phba); 6166 return NULL; 6167 } 6168 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL; 6169 6170 spin_lock_init(&phba->ct_ev_lock); 6171 INIT_LIST_HEAD(&phba->ct_ev_waiters); 6172 6173 return phba; 6174 } 6175 6176 /** 6177 * lpfc_hba_free - Free driver hba data structure with a device. 6178 * @phba: pointer to lpfc hba data structure. 6179 * 6180 * This routine is invoked to free the driver hba data structure with an 6181 * HBA device. 6182 **/ 6183 static void 6184 lpfc_hba_free(struct lpfc_hba *phba) 6185 { 6186 /* Release the driver assigned board number */ 6187 idr_remove(&lpfc_hba_index, phba->brd_no); 6188 6189 /* Free memory allocated with sli rings */ 6190 kfree(phba->sli.ring); 6191 phba->sli.ring = NULL; 6192 6193 kfree(phba); 6194 return; 6195 } 6196 6197 /** 6198 * lpfc_create_shost - Create hba physical port with associated scsi host. 6199 * @phba: pointer to lpfc hba data structure. 6200 * 6201 * This routine is invoked to create HBA physical port and associate a SCSI 6202 * host with it. 6203 * 6204 * Return codes 6205 * 0 - successful 6206 * other values - error 6207 **/ 6208 static int 6209 lpfc_create_shost(struct lpfc_hba *phba) 6210 { 6211 struct lpfc_vport *vport; 6212 struct Scsi_Host *shost; 6213 6214 /* Initialize HBA FC structure */ 6215 phba->fc_edtov = FF_DEF_EDTOV; 6216 phba->fc_ratov = FF_DEF_RATOV; 6217 phba->fc_altov = FF_DEF_ALTOV; 6218 phba->fc_arbtov = FF_DEF_ARBTOV; 6219 6220 atomic_set(&phba->sdev_cnt, 0); 6221 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); 6222 if (!vport) 6223 return -ENODEV; 6224 6225 shost = lpfc_shost_from_vport(vport); 6226 phba->pport = vport; 6227 lpfc_debugfs_initialize(vport); 6228 /* Put reference to SCSI host to driver's device private data */ 6229 pci_set_drvdata(phba->pcidev, shost); 6230 6231 /* 6232 * At this point we are fully registered with PSA. In addition, 6233 * any initial discovery should be completed. 6234 */ 6235 vport->load_flag |= FC_ALLOW_FDMI; 6236 if (phba->cfg_enable_SmartSAN || 6237 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 6238 6239 /* Setup appropriate attribute masks */ 6240 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 6241 if (phba->cfg_enable_SmartSAN) 6242 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 6243 else 6244 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 6245 } 6246 return 0; 6247 } 6248 6249 /** 6250 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. 6251 * @phba: pointer to lpfc hba data structure. 6252 * 6253 * This routine is invoked to destroy HBA physical port and the associated 6254 * SCSI host. 6255 **/ 6256 static void 6257 lpfc_destroy_shost(struct lpfc_hba *phba) 6258 { 6259 struct lpfc_vport *vport = phba->pport; 6260 6261 /* Destroy physical port that associated with the SCSI host */ 6262 destroy_port(vport); 6263 6264 return; 6265 } 6266 6267 /** 6268 * lpfc_setup_bg - Setup Block guard structures and debug areas. 6269 * @phba: pointer to lpfc hba data structure. 6270 * @shost: the shost to be used to detect Block guard settings. 6271 * 6272 * This routine sets up the local Block guard protocol settings for @shost. 6273 * This routine also allocates memory for debugging bg buffers. 6274 **/ 6275 static void 6276 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) 6277 { 6278 uint32_t old_mask; 6279 uint32_t old_guard; 6280 6281 int pagecnt = 10; 6282 if (lpfc_prot_mask && lpfc_prot_guard) { 6283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6284 "1478 Registering BlockGuard with the " 6285 "SCSI layer\n"); 6286 6287 old_mask = lpfc_prot_mask; 6288 old_guard = lpfc_prot_guard; 6289 6290 /* Only allow supported values */ 6291 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION | 6292 SHOST_DIX_TYPE0_PROTECTION | 6293 SHOST_DIX_TYPE1_PROTECTION); 6294 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC); 6295 6296 /* DIF Type 1 protection for profiles AST1/C1 is end to end */ 6297 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION) 6298 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION; 6299 6300 if (lpfc_prot_mask && lpfc_prot_guard) { 6301 if ((old_mask != lpfc_prot_mask) || 6302 (old_guard != lpfc_prot_guard)) 6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6304 "1475 Registering BlockGuard with the " 6305 "SCSI layer: mask %d guard %d\n", 6306 lpfc_prot_mask, lpfc_prot_guard); 6307 6308 scsi_host_set_prot(shost, lpfc_prot_mask); 6309 scsi_host_set_guard(shost, lpfc_prot_guard); 6310 } else 6311 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6312 "1479 Not Registering BlockGuard with the SCSI " 6313 "layer, Bad protection parameters: %d %d\n", 6314 old_mask, old_guard); 6315 } 6316 6317 if (!_dump_buf_data) { 6318 while (pagecnt) { 6319 spin_lock_init(&_dump_buf_lock); 6320 _dump_buf_data = 6321 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6322 if (_dump_buf_data) { 6323 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6324 "9043 BLKGRD: allocated %d pages for " 6325 "_dump_buf_data at 0x%p\n", 6326 (1 << pagecnt), _dump_buf_data); 6327 _dump_buf_data_order = pagecnt; 6328 memset(_dump_buf_data, 0, 6329 ((1 << PAGE_SHIFT) << pagecnt)); 6330 break; 6331 } else 6332 --pagecnt; 6333 } 6334 if (!_dump_buf_data_order) 6335 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6336 "9044 BLKGRD: ERROR unable to allocate " 6337 "memory for hexdump\n"); 6338 } else 6339 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6340 "9045 BLKGRD: already allocated _dump_buf_data=0x%p" 6341 "\n", _dump_buf_data); 6342 if (!_dump_buf_dif) { 6343 while (pagecnt) { 6344 _dump_buf_dif = 6345 (char *) __get_free_pages(GFP_KERNEL, pagecnt); 6346 if (_dump_buf_dif) { 6347 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6348 "9046 BLKGRD: allocated %d pages for " 6349 "_dump_buf_dif at 0x%p\n", 6350 (1 << pagecnt), _dump_buf_dif); 6351 _dump_buf_dif_order = pagecnt; 6352 memset(_dump_buf_dif, 0, 6353 ((1 << PAGE_SHIFT) << pagecnt)); 6354 break; 6355 } else 6356 --pagecnt; 6357 } 6358 if (!_dump_buf_dif_order) 6359 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6360 "9047 BLKGRD: ERROR unable to allocate " 6361 "memory for hexdump\n"); 6362 } else 6363 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 6364 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", 6365 _dump_buf_dif); 6366 } 6367 6368 /** 6369 * lpfc_post_init_setup - Perform necessary device post initialization setup. 6370 * @phba: pointer to lpfc hba data structure. 6371 * 6372 * This routine is invoked to perform all the necessary post initialization 6373 * setup for the device. 6374 **/ 6375 static void 6376 lpfc_post_init_setup(struct lpfc_hba *phba) 6377 { 6378 struct Scsi_Host *shost; 6379 struct lpfc_adapter_event_header adapter_event; 6380 6381 /* Get the default values for Model Name and Description */ 6382 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 6383 6384 /* 6385 * hba setup may have changed the hba_queue_depth so we need to 6386 * adjust the value of can_queue. 6387 */ 6388 shost = pci_get_drvdata(phba->pcidev); 6389 shost->can_queue = phba->cfg_hba_queue_depth - 10; 6390 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) 6391 lpfc_setup_bg(phba, shost); 6392 6393 lpfc_host_attrib_init(shost); 6394 6395 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 6396 spin_lock_irq(shost->host_lock); 6397 lpfc_poll_start_timer(phba); 6398 spin_unlock_irq(shost->host_lock); 6399 } 6400 6401 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6402 "0428 Perform SCSI scan\n"); 6403 /* Send board arrival event to upper layer */ 6404 adapter_event.event_type = FC_REG_ADAPTER_EVENT; 6405 adapter_event.subcategory = LPFC_EVENT_ARRIVAL; 6406 fc_host_post_vendor_event(shost, fc_get_event_number(), 6407 sizeof(adapter_event), 6408 (char *) &adapter_event, 6409 LPFC_NL_VENDOR_ID); 6410 return; 6411 } 6412 6413 /** 6414 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. 6415 * @phba: pointer to lpfc hba data structure. 6416 * 6417 * This routine is invoked to set up the PCI device memory space for device 6418 * with SLI-3 interface spec. 6419 * 6420 * Return codes 6421 * 0 - successful 6422 * other values - error 6423 **/ 6424 static int 6425 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) 6426 { 6427 struct pci_dev *pdev; 6428 unsigned long bar0map_len, bar2map_len; 6429 int i, hbq_count; 6430 void *ptr; 6431 int error = -ENODEV; 6432 6433 /* Obtain PCI device reference */ 6434 if (!phba->pcidev) 6435 return error; 6436 else 6437 pdev = phba->pcidev; 6438 6439 /* Set the device DMA mask size */ 6440 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 6441 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 6442 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 6443 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 6444 return error; 6445 } 6446 } 6447 6448 /* Get the bus address of Bar0 and Bar2 and the number of bytes 6449 * required by each mapping. 6450 */ 6451 phba->pci_bar0_map = pci_resource_start(pdev, 0); 6452 bar0map_len = pci_resource_len(pdev, 0); 6453 6454 phba->pci_bar2_map = pci_resource_start(pdev, 2); 6455 bar2map_len = pci_resource_len(pdev, 2); 6456 6457 /* Map HBA SLIM to a kernel virtual address. */ 6458 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); 6459 if (!phba->slim_memmap_p) { 6460 dev_printk(KERN_ERR, &pdev->dev, 6461 "ioremap failed for SLIM memory.\n"); 6462 goto out; 6463 } 6464 6465 /* Map HBA Control Registers to a kernel virtual address. */ 6466 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); 6467 if (!phba->ctrl_regs_memmap_p) { 6468 dev_printk(KERN_ERR, &pdev->dev, 6469 "ioremap failed for HBA control registers.\n"); 6470 goto out_iounmap_slim; 6471 } 6472 6473 /* Allocate memory for SLI-2 structures */ 6474 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6475 &phba->slim2p.phys, GFP_KERNEL); 6476 if (!phba->slim2p.virt) 6477 goto out_iounmap; 6478 6479 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6480 phba->mbox_ext = (phba->slim2p.virt + 6481 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6482 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); 6483 phba->IOCBs = (phba->slim2p.virt + 6484 offsetof(struct lpfc_sli2_slim, IOCBs)); 6485 6486 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, 6487 lpfc_sli_hbq_size(), 6488 &phba->hbqslimp.phys, 6489 GFP_KERNEL); 6490 if (!phba->hbqslimp.virt) 6491 goto out_free_slim; 6492 6493 hbq_count = lpfc_sli_hbq_count(); 6494 ptr = phba->hbqslimp.virt; 6495 for (i = 0; i < hbq_count; ++i) { 6496 phba->hbqs[i].hbq_virt = ptr; 6497 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); 6498 ptr += (lpfc_hbq_defs[i]->entry_count * 6499 sizeof(struct lpfc_hbq_entry)); 6500 } 6501 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; 6502 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; 6503 6504 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 6505 6506 INIT_LIST_HEAD(&phba->rb_pend_list); 6507 6508 phba->MBslimaddr = phba->slim_memmap_p; 6509 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 6510 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 6511 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; 6512 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; 6513 6514 return 0; 6515 6516 out_free_slim: 6517 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6518 phba->slim2p.virt, phba->slim2p.phys); 6519 out_iounmap: 6520 iounmap(phba->ctrl_regs_memmap_p); 6521 out_iounmap_slim: 6522 iounmap(phba->slim_memmap_p); 6523 out: 6524 return error; 6525 } 6526 6527 /** 6528 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. 6529 * @phba: pointer to lpfc hba data structure. 6530 * 6531 * This routine is invoked to unset the PCI device memory space for device 6532 * with SLI-3 interface spec. 6533 **/ 6534 static void 6535 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) 6536 { 6537 struct pci_dev *pdev; 6538 6539 /* Obtain PCI device reference */ 6540 if (!phba->pcidev) 6541 return; 6542 else 6543 pdev = phba->pcidev; 6544 6545 /* Free coherent DMA memory allocated */ 6546 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 6547 phba->hbqslimp.virt, phba->hbqslimp.phys); 6548 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 6549 phba->slim2p.virt, phba->slim2p.phys); 6550 6551 /* I/O memory unmap */ 6552 iounmap(phba->ctrl_regs_memmap_p); 6553 iounmap(phba->slim_memmap_p); 6554 6555 return; 6556 } 6557 6558 /** 6559 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status 6560 * @phba: pointer to lpfc hba data structure. 6561 * 6562 * This routine is invoked to wait for SLI4 device Power On Self Test (POST) 6563 * done and check status. 6564 * 6565 * Return 0 if successful, otherwise -ENODEV. 6566 **/ 6567 int 6568 lpfc_sli4_post_status_check(struct lpfc_hba *phba) 6569 { 6570 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; 6571 struct lpfc_register reg_data; 6572 int i, port_error = 0; 6573 uint32_t if_type; 6574 6575 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg)); 6576 memset(®_data, 0, sizeof(reg_data)); 6577 if (!phba->sli4_hba.PSMPHRregaddr) 6578 return -ENODEV; 6579 6580 /* Wait up to 30 seconds for the SLI Port POST done and ready */ 6581 for (i = 0; i < 3000; i++) { 6582 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 6583 &portsmphr_reg.word0) || 6584 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) { 6585 /* Port has a fatal POST error, break out */ 6586 port_error = -ENODEV; 6587 break; 6588 } 6589 if (LPFC_POST_STAGE_PORT_READY == 6590 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) 6591 break; 6592 msleep(10); 6593 } 6594 6595 /* 6596 * If there was a port error during POST, then don't proceed with 6597 * other register reads as the data may not be valid. Just exit. 6598 */ 6599 if (port_error) { 6600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6601 "1408 Port Failed POST - portsmphr=0x%x, " 6602 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " 6603 "scr2=x%x, hscratch=x%x, pstatus=x%x\n", 6604 portsmphr_reg.word0, 6605 bf_get(lpfc_port_smphr_perr, &portsmphr_reg), 6606 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), 6607 bf_get(lpfc_port_smphr_nip, &portsmphr_reg), 6608 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), 6609 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), 6610 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), 6611 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), 6612 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); 6613 } else { 6614 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6615 "2534 Device Info: SLIFamily=0x%x, " 6616 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " 6617 "SLIHint_2=0x%x, FT=0x%x\n", 6618 bf_get(lpfc_sli_intf_sli_family, 6619 &phba->sli4_hba.sli_intf), 6620 bf_get(lpfc_sli_intf_slirev, 6621 &phba->sli4_hba.sli_intf), 6622 bf_get(lpfc_sli_intf_if_type, 6623 &phba->sli4_hba.sli_intf), 6624 bf_get(lpfc_sli_intf_sli_hint1, 6625 &phba->sli4_hba.sli_intf), 6626 bf_get(lpfc_sli_intf_sli_hint2, 6627 &phba->sli4_hba.sli_intf), 6628 bf_get(lpfc_sli_intf_func_type, 6629 &phba->sli4_hba.sli_intf)); 6630 /* 6631 * Check for other Port errors during the initialization 6632 * process. Fail the load if the port did not come up 6633 * correctly. 6634 */ 6635 if_type = bf_get(lpfc_sli_intf_if_type, 6636 &phba->sli4_hba.sli_intf); 6637 switch (if_type) { 6638 case LPFC_SLI_INTF_IF_TYPE_0: 6639 phba->sli4_hba.ue_mask_lo = 6640 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); 6641 phba->sli4_hba.ue_mask_hi = 6642 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); 6643 uerrlo_reg.word0 = 6644 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 6645 uerrhi_reg.word0 = 6646 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 6647 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || 6648 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { 6649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6650 "1422 Unrecoverable Error " 6651 "Detected during POST " 6652 "uerr_lo_reg=0x%x, " 6653 "uerr_hi_reg=0x%x, " 6654 "ue_mask_lo_reg=0x%x, " 6655 "ue_mask_hi_reg=0x%x\n", 6656 uerrlo_reg.word0, 6657 uerrhi_reg.word0, 6658 phba->sli4_hba.ue_mask_lo, 6659 phba->sli4_hba.ue_mask_hi); 6660 port_error = -ENODEV; 6661 } 6662 break; 6663 case LPFC_SLI_INTF_IF_TYPE_2: 6664 /* Final checks. The port status should be clean. */ 6665 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 6666 ®_data.word0) || 6667 (bf_get(lpfc_sliport_status_err, ®_data) && 6668 !bf_get(lpfc_sliport_status_rn, ®_data))) { 6669 phba->work_status[0] = 6670 readl(phba->sli4_hba.u.if_type2. 6671 ERR1regaddr); 6672 phba->work_status[1] = 6673 readl(phba->sli4_hba.u.if_type2. 6674 ERR2regaddr); 6675 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6676 "2888 Unrecoverable port error " 6677 "following POST: port status reg " 6678 "0x%x, port_smphr reg 0x%x, " 6679 "error 1=0x%x, error 2=0x%x\n", 6680 reg_data.word0, 6681 portsmphr_reg.word0, 6682 phba->work_status[0], 6683 phba->work_status[1]); 6684 port_error = -ENODEV; 6685 } 6686 break; 6687 case LPFC_SLI_INTF_IF_TYPE_1: 6688 default: 6689 break; 6690 } 6691 } 6692 return port_error; 6693 } 6694 6695 /** 6696 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. 6697 * @phba: pointer to lpfc hba data structure. 6698 * @if_type: The SLI4 interface type getting configured. 6699 * 6700 * This routine is invoked to set up SLI4 BAR0 PCI config space register 6701 * memory map. 6702 **/ 6703 static void 6704 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) 6705 { 6706 switch (if_type) { 6707 case LPFC_SLI_INTF_IF_TYPE_0: 6708 phba->sli4_hba.u.if_type0.UERRLOregaddr = 6709 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; 6710 phba->sli4_hba.u.if_type0.UERRHIregaddr = 6711 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; 6712 phba->sli4_hba.u.if_type0.UEMASKLOregaddr = 6713 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; 6714 phba->sli4_hba.u.if_type0.UEMASKHIregaddr = 6715 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; 6716 phba->sli4_hba.SLIINTFregaddr = 6717 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6718 break; 6719 case LPFC_SLI_INTF_IF_TYPE_2: 6720 phba->sli4_hba.u.if_type2.ERR1regaddr = 6721 phba->sli4_hba.conf_regs_memmap_p + 6722 LPFC_CTL_PORT_ER1_OFFSET; 6723 phba->sli4_hba.u.if_type2.ERR2regaddr = 6724 phba->sli4_hba.conf_regs_memmap_p + 6725 LPFC_CTL_PORT_ER2_OFFSET; 6726 phba->sli4_hba.u.if_type2.CTRLregaddr = 6727 phba->sli4_hba.conf_regs_memmap_p + 6728 LPFC_CTL_PORT_CTL_OFFSET; 6729 phba->sli4_hba.u.if_type2.STATUSregaddr = 6730 phba->sli4_hba.conf_regs_memmap_p + 6731 LPFC_CTL_PORT_STA_OFFSET; 6732 phba->sli4_hba.SLIINTFregaddr = 6733 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; 6734 phba->sli4_hba.PSMPHRregaddr = 6735 phba->sli4_hba.conf_regs_memmap_p + 6736 LPFC_CTL_PORT_SEM_OFFSET; 6737 phba->sli4_hba.RQDBregaddr = 6738 phba->sli4_hba.conf_regs_memmap_p + 6739 LPFC_ULP0_RQ_DOORBELL; 6740 phba->sli4_hba.WQDBregaddr = 6741 phba->sli4_hba.conf_regs_memmap_p + 6742 LPFC_ULP0_WQ_DOORBELL; 6743 phba->sli4_hba.EQCQDBregaddr = 6744 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; 6745 phba->sli4_hba.MQDBregaddr = 6746 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; 6747 phba->sli4_hba.BMBXregaddr = 6748 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; 6749 break; 6750 case LPFC_SLI_INTF_IF_TYPE_1: 6751 default: 6752 dev_printk(KERN_ERR, &phba->pcidev->dev, 6753 "FATAL - unsupported SLI4 interface type - %d\n", 6754 if_type); 6755 break; 6756 } 6757 } 6758 6759 /** 6760 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. 6761 * @phba: pointer to lpfc hba data structure. 6762 * 6763 * This routine is invoked to set up SLI4 BAR1 control status register (CSR) 6764 * memory map. 6765 **/ 6766 static void 6767 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) 6768 { 6769 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6770 LPFC_SLIPORT_IF0_SMPHR; 6771 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6772 LPFC_HST_ISR0; 6773 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6774 LPFC_HST_IMR0; 6775 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + 6776 LPFC_HST_ISCR0; 6777 } 6778 6779 /** 6780 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. 6781 * @phba: pointer to lpfc hba data structure. 6782 * @vf: virtual function number 6783 * 6784 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map 6785 * based on the given viftual function number, @vf. 6786 * 6787 * Return 0 if successful, otherwise -ENODEV. 6788 **/ 6789 static int 6790 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) 6791 { 6792 if (vf > LPFC_VIR_FUNC_MAX) 6793 return -ENODEV; 6794 6795 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6796 vf * LPFC_VFR_PAGE_SIZE + 6797 LPFC_ULP0_RQ_DOORBELL); 6798 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6799 vf * LPFC_VFR_PAGE_SIZE + 6800 LPFC_ULP0_WQ_DOORBELL); 6801 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6802 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); 6803 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6804 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); 6805 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + 6806 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); 6807 return 0; 6808 } 6809 6810 /** 6811 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox 6812 * @phba: pointer to lpfc hba data structure. 6813 * 6814 * This routine is invoked to create the bootstrap mailbox 6815 * region consistent with the SLI-4 interface spec. This 6816 * routine allocates all memory necessary to communicate 6817 * mailbox commands to the port and sets up all alignment 6818 * needs. No locks are expected to be held when calling 6819 * this routine. 6820 * 6821 * Return codes 6822 * 0 - successful 6823 * -ENOMEM - could not allocated memory. 6824 **/ 6825 static int 6826 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) 6827 { 6828 uint32_t bmbx_size; 6829 struct lpfc_dmabuf *dmabuf; 6830 struct dma_address *dma_address; 6831 uint32_t pa_addr; 6832 uint64_t phys_addr; 6833 6834 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 6835 if (!dmabuf) 6836 return -ENOMEM; 6837 6838 /* 6839 * The bootstrap mailbox region is comprised of 2 parts 6840 * plus an alignment restriction of 16 bytes. 6841 */ 6842 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6843 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 6844 &dmabuf->phys, GFP_KERNEL); 6845 if (!dmabuf->virt) { 6846 kfree(dmabuf); 6847 return -ENOMEM; 6848 } 6849 6850 /* 6851 * Initialize the bootstrap mailbox pointers now so that the register 6852 * operations are simple later. The mailbox dma address is required 6853 * to be 16-byte aligned. Also align the virtual memory as each 6854 * maibox is copied into the bmbx mailbox region before issuing the 6855 * command to the port. 6856 */ 6857 phba->sli4_hba.bmbx.dmabuf = dmabuf; 6858 phba->sli4_hba.bmbx.bmbx_size = bmbx_size; 6859 6860 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, 6861 LPFC_ALIGN_16_BYTE); 6862 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, 6863 LPFC_ALIGN_16_BYTE); 6864 6865 /* 6866 * Set the high and low physical addresses now. The SLI4 alignment 6867 * requirement is 16 bytes and the mailbox is posted to the port 6868 * as two 30-bit addresses. The other data is a bit marking whether 6869 * the 30-bit address is the high or low address. 6870 * Upcast bmbx aphys to 64bits so shift instruction compiles 6871 * clean on 32 bit machines. 6872 */ 6873 dma_address = &phba->sli4_hba.bmbx.dma_address; 6874 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; 6875 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); 6876 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | 6877 LPFC_BMBX_BIT1_ADDR_HI); 6878 6879 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); 6880 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | 6881 LPFC_BMBX_BIT1_ADDR_LO); 6882 return 0; 6883 } 6884 6885 /** 6886 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources 6887 * @phba: pointer to lpfc hba data structure. 6888 * 6889 * This routine is invoked to teardown the bootstrap mailbox 6890 * region and release all host resources. This routine requires 6891 * the caller to ensure all mailbox commands recovered, no 6892 * additional mailbox comands are sent, and interrupts are disabled 6893 * before calling this routine. 6894 * 6895 **/ 6896 static void 6897 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) 6898 { 6899 dma_free_coherent(&phba->pcidev->dev, 6900 phba->sli4_hba.bmbx.bmbx_size, 6901 phba->sli4_hba.bmbx.dmabuf->virt, 6902 phba->sli4_hba.bmbx.dmabuf->phys); 6903 6904 kfree(phba->sli4_hba.bmbx.dmabuf); 6905 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); 6906 } 6907 6908 /** 6909 * lpfc_sli4_read_config - Get the config parameters. 6910 * @phba: pointer to lpfc hba data structure. 6911 * 6912 * This routine is invoked to read the configuration parameters from the HBA. 6913 * The configuration parameters are used to set the base and maximum values 6914 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource 6915 * allocation for the port. 6916 * 6917 * Return codes 6918 * 0 - successful 6919 * -ENOMEM - No available memory 6920 * -EIO - The mailbox failed to complete successfully. 6921 **/ 6922 int 6923 lpfc_sli4_read_config(struct lpfc_hba *phba) 6924 { 6925 LPFC_MBOXQ_t *pmb; 6926 struct lpfc_mbx_read_config *rd_config; 6927 union lpfc_sli4_cfg_shdr *shdr; 6928 uint32_t shdr_status, shdr_add_status; 6929 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6930 struct lpfc_rsrc_desc_fcfcoe *desc; 6931 char *pdesc_0; 6932 int length, i, rc = 0, rc2; 6933 6934 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6935 if (!pmb) { 6936 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6937 "2011 Unable to allocate memory for issuing " 6938 "SLI_CONFIG_SPECIAL mailbox command\n"); 6939 return -ENOMEM; 6940 } 6941 6942 lpfc_read_config(phba, pmb); 6943 6944 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6945 if (rc != MBX_SUCCESS) { 6946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6947 "2012 Mailbox failed , mbxCmd x%x " 6948 "READ_CONFIG, mbxStatus x%x\n", 6949 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6950 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6951 rc = -EIO; 6952 } else { 6953 rd_config = &pmb->u.mqe.un.rd_config; 6954 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { 6955 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 6956 phba->sli4_hba.lnk_info.lnk_tp = 6957 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); 6958 phba->sli4_hba.lnk_info.lnk_no = 6959 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); 6960 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 6961 "3081 lnk_type:%d, lnk_numb:%d\n", 6962 phba->sli4_hba.lnk_info.lnk_tp, 6963 phba->sli4_hba.lnk_info.lnk_no); 6964 } else 6965 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6966 "3082 Mailbox (x%x) returned ldv:x0\n", 6967 bf_get(lpfc_mqe_command, &pmb->u.mqe)); 6968 phba->sli4_hba.extents_in_use = 6969 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); 6970 phba->sli4_hba.max_cfg_param.max_xri = 6971 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); 6972 phba->sli4_hba.max_cfg_param.xri_base = 6973 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); 6974 phba->sli4_hba.max_cfg_param.max_vpi = 6975 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); 6976 phba->sli4_hba.max_cfg_param.vpi_base = 6977 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); 6978 phba->sli4_hba.max_cfg_param.max_rpi = 6979 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); 6980 phba->sli4_hba.max_cfg_param.rpi_base = 6981 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); 6982 phba->sli4_hba.max_cfg_param.max_vfi = 6983 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); 6984 phba->sli4_hba.max_cfg_param.vfi_base = 6985 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); 6986 phba->sli4_hba.max_cfg_param.max_fcfi = 6987 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); 6988 phba->sli4_hba.max_cfg_param.max_eq = 6989 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); 6990 phba->sli4_hba.max_cfg_param.max_rq = 6991 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); 6992 phba->sli4_hba.max_cfg_param.max_wq = 6993 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); 6994 phba->sli4_hba.max_cfg_param.max_cq = 6995 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); 6996 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); 6997 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6998 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6999 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 7000 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 7001 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 7002 phba->max_vports = phba->max_vpi; 7003 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7004 "2003 cfg params Extents? %d " 7005 "XRI(B:%d M:%d), " 7006 "VPI(B:%d M:%d) " 7007 "VFI(B:%d M:%d) " 7008 "RPI(B:%d M:%d) " 7009 "FCFI(Count:%d)\n", 7010 phba->sli4_hba.extents_in_use, 7011 phba->sli4_hba.max_cfg_param.xri_base, 7012 phba->sli4_hba.max_cfg_param.max_xri, 7013 phba->sli4_hba.max_cfg_param.vpi_base, 7014 phba->sli4_hba.max_cfg_param.max_vpi, 7015 phba->sli4_hba.max_cfg_param.vfi_base, 7016 phba->sli4_hba.max_cfg_param.max_vfi, 7017 phba->sli4_hba.max_cfg_param.rpi_base, 7018 phba->sli4_hba.max_cfg_param.max_rpi, 7019 phba->sli4_hba.max_cfg_param.max_fcfi); 7020 } 7021 7022 if (rc) 7023 goto read_cfg_out; 7024 7025 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 7026 length = phba->sli4_hba.max_cfg_param.max_xri - 7027 lpfc_sli4_get_els_iocb_cnt(phba); 7028 if (phba->cfg_hba_queue_depth > length) { 7029 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7030 "3361 HBA queue depth changed from %d to %d\n", 7031 phba->cfg_hba_queue_depth, length); 7032 phba->cfg_hba_queue_depth = length; 7033 } 7034 7035 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 7036 LPFC_SLI_INTF_IF_TYPE_2) 7037 goto read_cfg_out; 7038 7039 /* get the pf# and vf# for SLI4 if_type 2 port */ 7040 length = (sizeof(struct lpfc_mbx_get_func_cfg) - 7041 sizeof(struct lpfc_sli4_cfg_mhdr)); 7042 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON, 7043 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 7044 length, LPFC_SLI4_MBX_EMBED); 7045 7046 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 7047 shdr = (union lpfc_sli4_cfg_shdr *) 7048 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 7049 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7050 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7051 if (rc2 || shdr_status || shdr_add_status) { 7052 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7053 "3026 Mailbox failed , mbxCmd x%x " 7054 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 7055 bf_get(lpfc_mqe_command, &pmb->u.mqe), 7056 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 7057 goto read_cfg_out; 7058 } 7059 7060 /* search for fc_fcoe resrouce descriptor */ 7061 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 7062 7063 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 7064 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 7065 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc); 7066 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD) 7067 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH; 7068 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH) 7069 goto read_cfg_out; 7070 7071 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 7072 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i); 7073 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 7074 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) { 7075 phba->sli4_hba.iov.pf_number = 7076 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 7077 phba->sli4_hba.iov.vf_number = 7078 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc); 7079 break; 7080 } 7081 } 7082 7083 if (i < LPFC_RSRC_DESC_MAX_NUM) 7084 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 7085 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 7086 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 7087 phba->sli4_hba.iov.vf_number); 7088 else 7089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7090 "3028 GET_FUNCTION_CONFIG: failed to find " 7091 "Resrouce Descriptor:x%x\n", 7092 LPFC_RSRC_DESC_TYPE_FCFCOE); 7093 7094 read_cfg_out: 7095 mempool_free(pmb, phba->mbox_mem_pool); 7096 return rc; 7097 } 7098 7099 /** 7100 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. 7101 * @phba: pointer to lpfc hba data structure. 7102 * 7103 * This routine is invoked to setup the port-side endian order when 7104 * the port if_type is 0. This routine has no function for other 7105 * if_types. 7106 * 7107 * Return codes 7108 * 0 - successful 7109 * -ENOMEM - No available memory 7110 * -EIO - The mailbox failed to complete successfully. 7111 **/ 7112 static int 7113 lpfc_setup_endian_order(struct lpfc_hba *phba) 7114 { 7115 LPFC_MBOXQ_t *mboxq; 7116 uint32_t if_type, rc = 0; 7117 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, 7118 HOST_ENDIAN_HIGH_WORD1}; 7119 7120 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7121 switch (if_type) { 7122 case LPFC_SLI_INTF_IF_TYPE_0: 7123 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 7124 GFP_KERNEL); 7125 if (!mboxq) { 7126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7127 "0492 Unable to allocate memory for " 7128 "issuing SLI_CONFIG_SPECIAL mailbox " 7129 "command\n"); 7130 return -ENOMEM; 7131 } 7132 7133 /* 7134 * The SLI4_CONFIG_SPECIAL mailbox command requires the first 7135 * two words to contain special data values and no other data. 7136 */ 7137 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 7138 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); 7139 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7140 if (rc != MBX_SUCCESS) { 7141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7142 "0493 SLI_CONFIG_SPECIAL mailbox " 7143 "failed with status x%x\n", 7144 rc); 7145 rc = -EIO; 7146 } 7147 mempool_free(mboxq, phba->mbox_mem_pool); 7148 break; 7149 case LPFC_SLI_INTF_IF_TYPE_2: 7150 case LPFC_SLI_INTF_IF_TYPE_1: 7151 default: 7152 break; 7153 } 7154 return rc; 7155 } 7156 7157 /** 7158 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts 7159 * @phba: pointer to lpfc hba data structure. 7160 * 7161 * This routine is invoked to check the user settable queue counts for EQs and 7162 * CQs. after this routine is called the counts will be set to valid values that 7163 * adhere to the constraints of the system's interrupt vectors and the port's 7164 * queue resources. 7165 * 7166 * Return codes 7167 * 0 - successful 7168 * -ENOMEM - No available memory 7169 **/ 7170 static int 7171 lpfc_sli4_queue_verify(struct lpfc_hba *phba) 7172 { 7173 int cfg_fcp_io_channel; 7174 uint32_t cpu; 7175 uint32_t i = 0; 7176 int fof_vectors = phba->cfg_fof ? 1 : 0; 7177 7178 /* 7179 * Sanity check for configured queue parameters against the run-time 7180 * device parameters 7181 */ 7182 7183 /* Sanity check on HBA EQ parameters */ 7184 cfg_fcp_io_channel = phba->cfg_fcp_io_channel; 7185 7186 /* It doesn't make sense to have more io channels then online CPUs */ 7187 for_each_present_cpu(cpu) { 7188 if (cpu_online(cpu)) 7189 i++; 7190 } 7191 phba->sli4_hba.num_online_cpu = i; 7192 phba->sli4_hba.num_present_cpu = lpfc_present_cpu; 7193 phba->sli4_hba.curr_disp_cpu = 0; 7194 7195 if (i < cfg_fcp_io_channel) { 7196 lpfc_printf_log(phba, 7197 KERN_ERR, LOG_INIT, 7198 "3188 Reducing IO channels to match number of " 7199 "online CPUs: from %d to %d\n", 7200 cfg_fcp_io_channel, i); 7201 cfg_fcp_io_channel = i; 7202 } 7203 7204 if (cfg_fcp_io_channel + fof_vectors > 7205 phba->sli4_hba.max_cfg_param.max_eq) { 7206 if (phba->sli4_hba.max_cfg_param.max_eq < 7207 LPFC_FCP_IO_CHAN_MIN) { 7208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7209 "2574 Not enough EQs (%d) from the " 7210 "pci function for supporting FCP " 7211 "EQs (%d)\n", 7212 phba->sli4_hba.max_cfg_param.max_eq, 7213 phba->cfg_fcp_io_channel); 7214 goto out_error; 7215 } 7216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7217 "2575 Reducing IO channels to match number of " 7218 "available EQs: from %d to %d\n", 7219 cfg_fcp_io_channel, 7220 phba->sli4_hba.max_cfg_param.max_eq); 7221 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - 7222 fof_vectors; 7223 } 7224 7225 /* The actual number of FCP event queues adopted */ 7226 phba->cfg_fcp_io_channel = cfg_fcp_io_channel; 7227 7228 /* Get EQ depth from module parameter, fake the default for now */ 7229 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 7230 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 7231 7232 /* Get CQ depth from module parameter, fake the default for now */ 7233 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; 7234 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; 7235 7236 return 0; 7237 out_error: 7238 return -ENOMEM; 7239 } 7240 7241 /** 7242 * lpfc_sli4_queue_create - Create all the SLI4 queues 7243 * @phba: pointer to lpfc hba data structure. 7244 * 7245 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 7246 * operation. For each SLI4 queue type, the parameters such as queue entry 7247 * count (queue depth) shall be taken from the module parameter. For now, 7248 * we just use some constant number as place holder. 7249 * 7250 * Return codes 7251 * 0 - successful 7252 * -ENOMEM - No availble memory 7253 * -EIO - The mailbox failed to complete successfully. 7254 **/ 7255 int 7256 lpfc_sli4_queue_create(struct lpfc_hba *phba) 7257 { 7258 struct lpfc_queue *qdesc; 7259 int idx; 7260 7261 /* 7262 * Create HBA Record arrays. 7263 */ 7264 if (!phba->cfg_fcp_io_channel) 7265 return -ERANGE; 7266 7267 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 7268 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; 7269 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; 7270 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; 7271 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; 7272 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; 7273 7274 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) * 7275 phba->cfg_fcp_io_channel), GFP_KERNEL); 7276 if (!phba->sli4_hba.hba_eq) { 7277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7278 "2576 Failed allocate memory for " 7279 "fast-path EQ record array\n"); 7280 goto out_error; 7281 } 7282 7283 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 7284 phba->cfg_fcp_io_channel), GFP_KERNEL); 7285 if (!phba->sli4_hba.fcp_cq) { 7286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7287 "2577 Failed allocate memory for fast-path " 7288 "CQ record array\n"); 7289 goto out_error; 7290 } 7291 7292 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * 7293 phba->cfg_fcp_io_channel), GFP_KERNEL); 7294 if (!phba->sli4_hba.fcp_wq) { 7295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7296 "2578 Failed allocate memory for fast-path " 7297 "WQ record array\n"); 7298 goto out_error; 7299 } 7300 7301 /* 7302 * Since the first EQ can have multiple CQs associated with it, 7303 * this array is used to quickly see if we have a FCP fast-path 7304 * CQ match. 7305 */ 7306 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) * 7307 phba->cfg_fcp_io_channel), GFP_KERNEL); 7308 if (!phba->sli4_hba.fcp_cq_map) { 7309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7310 "2545 Failed allocate memory for fast-path " 7311 "CQ map\n"); 7312 goto out_error; 7313 } 7314 7315 /* 7316 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies 7317 * how many EQs to create. 7318 */ 7319 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7320 7321 /* Create EQs */ 7322 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 7323 phba->sli4_hba.eq_ecount); 7324 if (!qdesc) { 7325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7326 "0497 Failed allocate EQ (%d)\n", idx); 7327 goto out_error; 7328 } 7329 phba->sli4_hba.hba_eq[idx] = qdesc; 7330 7331 /* Create Fast Path FCP CQs */ 7332 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7333 phba->sli4_hba.cq_ecount); 7334 if (!qdesc) { 7335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7336 "0499 Failed allocate fast-path FCP " 7337 "CQ (%d)\n", idx); 7338 goto out_error; 7339 } 7340 phba->sli4_hba.fcp_cq[idx] = qdesc; 7341 7342 /* Create Fast Path FCP WQs */ 7343 if (phba->fcp_embed_io) { 7344 qdesc = lpfc_sli4_queue_alloc(phba, 7345 LPFC_WQE128_SIZE, 7346 LPFC_WQE128_DEF_COUNT); 7347 } else { 7348 qdesc = lpfc_sli4_queue_alloc(phba, 7349 phba->sli4_hba.wq_esize, 7350 phba->sli4_hba.wq_ecount); 7351 } 7352 if (!qdesc) { 7353 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7354 "0503 Failed allocate fast-path FCP " 7355 "WQ (%d)\n", idx); 7356 goto out_error; 7357 } 7358 phba->sli4_hba.fcp_wq[idx] = qdesc; 7359 } 7360 7361 7362 /* 7363 * Create Slow Path Completion Queues (CQs) 7364 */ 7365 7366 /* Create slow-path Mailbox Command Complete Queue */ 7367 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7368 phba->sli4_hba.cq_ecount); 7369 if (!qdesc) { 7370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7371 "0500 Failed allocate slow-path mailbox CQ\n"); 7372 goto out_error; 7373 } 7374 phba->sli4_hba.mbx_cq = qdesc; 7375 7376 /* Create slow-path ELS Complete Queue */ 7377 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 7378 phba->sli4_hba.cq_ecount); 7379 if (!qdesc) { 7380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7381 "0501 Failed allocate slow-path ELS CQ\n"); 7382 goto out_error; 7383 } 7384 phba->sli4_hba.els_cq = qdesc; 7385 7386 7387 /* 7388 * Create Slow Path Work Queues (WQs) 7389 */ 7390 7391 /* Create Mailbox Command Queue */ 7392 7393 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 7394 phba->sli4_hba.mq_ecount); 7395 if (!qdesc) { 7396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7397 "0505 Failed allocate slow-path MQ\n"); 7398 goto out_error; 7399 } 7400 phba->sli4_hba.mbx_wq = qdesc; 7401 7402 /* 7403 * Create ELS Work Queues 7404 */ 7405 7406 /* Create slow-path ELS Work Queue */ 7407 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 7408 phba->sli4_hba.wq_ecount); 7409 if (!qdesc) { 7410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7411 "0504 Failed allocate slow-path ELS WQ\n"); 7412 goto out_error; 7413 } 7414 phba->sli4_hba.els_wq = qdesc; 7415 7416 /* 7417 * Create Receive Queue (RQ) 7418 */ 7419 7420 /* Create Receive Queue for header */ 7421 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7422 phba->sli4_hba.rq_ecount); 7423 if (!qdesc) { 7424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7425 "0506 Failed allocate receive HRQ\n"); 7426 goto out_error; 7427 } 7428 phba->sli4_hba.hdr_rq = qdesc; 7429 7430 /* Create Receive Queue for data */ 7431 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 7432 phba->sli4_hba.rq_ecount); 7433 if (!qdesc) { 7434 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7435 "0507 Failed allocate receive DRQ\n"); 7436 goto out_error; 7437 } 7438 phba->sli4_hba.dat_rq = qdesc; 7439 7440 /* Create the Queues needed for Flash Optimized Fabric operations */ 7441 if (phba->cfg_fof) 7442 lpfc_fof_queue_create(phba); 7443 return 0; 7444 7445 out_error: 7446 lpfc_sli4_queue_destroy(phba); 7447 return -ENOMEM; 7448 } 7449 7450 /** 7451 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues 7452 * @phba: pointer to lpfc hba data structure. 7453 * 7454 * This routine is invoked to release all the SLI4 queues with the FCoE HBA 7455 * operation. 7456 * 7457 * Return codes 7458 * 0 - successful 7459 * -ENOMEM - No available memory 7460 * -EIO - The mailbox failed to complete successfully. 7461 **/ 7462 void 7463 lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 7464 { 7465 int idx; 7466 7467 if (phba->cfg_fof) 7468 lpfc_fof_queue_destroy(phba); 7469 7470 if (phba->sli4_hba.hba_eq != NULL) { 7471 /* Release HBA event queue */ 7472 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7473 if (phba->sli4_hba.hba_eq[idx] != NULL) { 7474 lpfc_sli4_queue_free( 7475 phba->sli4_hba.hba_eq[idx]); 7476 phba->sli4_hba.hba_eq[idx] = NULL; 7477 } 7478 } 7479 kfree(phba->sli4_hba.hba_eq); 7480 phba->sli4_hba.hba_eq = NULL; 7481 } 7482 7483 if (phba->sli4_hba.fcp_cq != NULL) { 7484 /* Release FCP completion queue */ 7485 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7486 if (phba->sli4_hba.fcp_cq[idx] != NULL) { 7487 lpfc_sli4_queue_free( 7488 phba->sli4_hba.fcp_cq[idx]); 7489 phba->sli4_hba.fcp_cq[idx] = NULL; 7490 } 7491 } 7492 kfree(phba->sli4_hba.fcp_cq); 7493 phba->sli4_hba.fcp_cq = NULL; 7494 } 7495 7496 if (phba->sli4_hba.fcp_wq != NULL) { 7497 /* Release FCP work queue */ 7498 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { 7499 if (phba->sli4_hba.fcp_wq[idx] != NULL) { 7500 lpfc_sli4_queue_free( 7501 phba->sli4_hba.fcp_wq[idx]); 7502 phba->sli4_hba.fcp_wq[idx] = NULL; 7503 } 7504 } 7505 kfree(phba->sli4_hba.fcp_wq); 7506 phba->sli4_hba.fcp_wq = NULL; 7507 } 7508 7509 /* Release FCP CQ mapping array */ 7510 if (phba->sli4_hba.fcp_cq_map != NULL) { 7511 kfree(phba->sli4_hba.fcp_cq_map); 7512 phba->sli4_hba.fcp_cq_map = NULL; 7513 } 7514 7515 /* Release mailbox command work queue */ 7516 if (phba->sli4_hba.mbx_wq != NULL) { 7517 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 7518 phba->sli4_hba.mbx_wq = NULL; 7519 } 7520 7521 /* Release ELS work queue */ 7522 if (phba->sli4_hba.els_wq != NULL) { 7523 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 7524 phba->sli4_hba.els_wq = NULL; 7525 } 7526 7527 /* Release unsolicited receive queue */ 7528 if (phba->sli4_hba.hdr_rq != NULL) { 7529 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 7530 phba->sli4_hba.hdr_rq = NULL; 7531 } 7532 if (phba->sli4_hba.dat_rq != NULL) { 7533 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 7534 phba->sli4_hba.dat_rq = NULL; 7535 } 7536 7537 /* Release ELS complete queue */ 7538 if (phba->sli4_hba.els_cq != NULL) { 7539 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 7540 phba->sli4_hba.els_cq = NULL; 7541 } 7542 7543 /* Release mailbox command complete queue */ 7544 if (phba->sli4_hba.mbx_cq != NULL) { 7545 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 7546 phba->sli4_hba.mbx_cq = NULL; 7547 } 7548 7549 return; 7550 } 7551 7552 /** 7553 * lpfc_sli4_queue_setup - Set up all the SLI4 queues 7554 * @phba: pointer to lpfc hba data structure. 7555 * 7556 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA 7557 * operation. 7558 * 7559 * Return codes 7560 * 0 - successful 7561 * -ENOMEM - No available memory 7562 * -EIO - The mailbox failed to complete successfully. 7563 **/ 7564 int 7565 lpfc_sli4_queue_setup(struct lpfc_hba *phba) 7566 { 7567 struct lpfc_sli *psli = &phba->sli; 7568 struct lpfc_sli_ring *pring; 7569 int rc = -ENOMEM; 7570 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 7571 int fcp_cq_index = 0; 7572 uint32_t shdr_status, shdr_add_status; 7573 union lpfc_sli4_cfg_shdr *shdr; 7574 LPFC_MBOXQ_t *mboxq; 7575 uint32_t length; 7576 7577 /* Check for dual-ULP support */ 7578 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7579 if (!mboxq) { 7580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7581 "3249 Unable to allocate memory for " 7582 "QUERY_FW_CFG mailbox command\n"); 7583 return -ENOMEM; 7584 } 7585 length = (sizeof(struct lpfc_mbx_query_fw_config) - 7586 sizeof(struct lpfc_sli4_cfg_mhdr)); 7587 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7588 LPFC_MBOX_OPCODE_QUERY_FW_CFG, 7589 length, LPFC_SLI4_MBX_EMBED); 7590 7591 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7592 7593 shdr = (union lpfc_sli4_cfg_shdr *) 7594 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 7595 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7596 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7597 if (shdr_status || shdr_add_status || rc) { 7598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7599 "3250 QUERY_FW_CFG mailbox failed with status " 7600 "x%x add_status x%x, mbx status x%x\n", 7601 shdr_status, shdr_add_status, rc); 7602 if (rc != MBX_TIMEOUT) 7603 mempool_free(mboxq, phba->mbox_mem_pool); 7604 rc = -ENXIO; 7605 goto out_error; 7606 } 7607 7608 phba->sli4_hba.fw_func_mode = 7609 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode; 7610 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode; 7611 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode; 7612 phba->sli4_hba.physical_port = 7613 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port; 7614 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7615 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, " 7616 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode, 7617 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode); 7618 7619 if (rc != MBX_TIMEOUT) 7620 mempool_free(mboxq, phba->mbox_mem_pool); 7621 7622 /* 7623 * Set up HBA Event Queues (EQs) 7624 */ 7625 7626 /* Set up HBA event queue */ 7627 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) { 7628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7629 "3147 Fast-path EQs not allocated\n"); 7630 rc = -ENOMEM; 7631 goto out_error; 7632 } 7633 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 7634 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) { 7635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7636 "0522 Fast-path EQ (%d) not " 7637 "allocated\n", fcp_eqidx); 7638 rc = -ENOMEM; 7639 goto out_destroy_hba_eq; 7640 } 7641 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx], 7642 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel)); 7643 if (rc) { 7644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7645 "0523 Failed setup of fast-path EQ " 7646 "(%d), rc = 0x%x\n", fcp_eqidx, 7647 (uint32_t)rc); 7648 goto out_destroy_hba_eq; 7649 } 7650 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7651 "2584 HBA EQ setup: " 7652 "queue[%d]-id=%d\n", fcp_eqidx, 7653 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id); 7654 } 7655 7656 /* Set up fast-path FCP Response Complete Queue */ 7657 if (!phba->sli4_hba.fcp_cq) { 7658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7659 "3148 Fast-path FCP CQ array not " 7660 "allocated\n"); 7661 rc = -ENOMEM; 7662 goto out_destroy_hba_eq; 7663 } 7664 7665 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) { 7666 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { 7667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7668 "0526 Fast-path FCP CQ (%d) not " 7669 "allocated\n", fcp_cqidx); 7670 rc = -ENOMEM; 7671 goto out_destroy_fcp_cq; 7672 } 7673 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], 7674 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP); 7675 if (rc) { 7676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7677 "0527 Failed setup of fast-path FCP " 7678 "CQ (%d), rc = 0x%x\n", fcp_cqidx, 7679 (uint32_t)rc); 7680 goto out_destroy_fcp_cq; 7681 } 7682 7683 /* Setup fcp_cq_map for fast lookup */ 7684 phba->sli4_hba.fcp_cq_map[fcp_cqidx] = 7685 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id; 7686 7687 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7688 "2588 FCP CQ setup: cq[%d]-id=%d, " 7689 "parent seq[%d]-id=%d\n", 7690 fcp_cqidx, 7691 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, 7692 fcp_cqidx, 7693 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id); 7694 } 7695 7696 /* Set up fast-path FCP Work Queue */ 7697 if (!phba->sli4_hba.fcp_wq) { 7698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7699 "3149 Fast-path FCP WQ array not " 7700 "allocated\n"); 7701 rc = -ENOMEM; 7702 goto out_destroy_fcp_cq; 7703 } 7704 7705 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) { 7706 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { 7707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7708 "0534 Fast-path FCP WQ (%d) not " 7709 "allocated\n", fcp_wqidx); 7710 rc = -ENOMEM; 7711 goto out_destroy_fcp_wq; 7712 } 7713 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], 7714 phba->sli4_hba.fcp_cq[fcp_wqidx], 7715 LPFC_FCP); 7716 if (rc) { 7717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7718 "0535 Failed setup of fast-path FCP " 7719 "WQ (%d), rc = 0x%x\n", fcp_wqidx, 7720 (uint32_t)rc); 7721 goto out_destroy_fcp_wq; 7722 } 7723 7724 /* Bind this WQ to the next FCP ring */ 7725 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx]; 7726 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx]; 7727 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring; 7728 7729 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7730 "2591 FCP WQ setup: wq[%d]-id=%d, " 7731 "parent cq[%d]-id=%d\n", 7732 fcp_wqidx, 7733 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, 7734 fcp_cq_index, 7735 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id); 7736 } 7737 /* 7738 * Set up Complete Queues (CQs) 7739 */ 7740 7741 /* Set up slow-path MBOX Complete Queue as the first CQ */ 7742 if (!phba->sli4_hba.mbx_cq) { 7743 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7744 "0528 Mailbox CQ not allocated\n"); 7745 rc = -ENOMEM; 7746 goto out_destroy_fcp_wq; 7747 } 7748 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, 7749 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX); 7750 if (rc) { 7751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7752 "0529 Failed setup of slow-path mailbox CQ: " 7753 "rc = 0x%x\n", (uint32_t)rc); 7754 goto out_destroy_fcp_wq; 7755 } 7756 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7757 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7758 phba->sli4_hba.mbx_cq->queue_id, 7759 phba->sli4_hba.hba_eq[0]->queue_id); 7760 7761 /* Set up slow-path ELS Complete Queue */ 7762 if (!phba->sli4_hba.els_cq) { 7763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7764 "0530 ELS CQ not allocated\n"); 7765 rc = -ENOMEM; 7766 goto out_destroy_mbx_cq; 7767 } 7768 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, 7769 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS); 7770 if (rc) { 7771 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7772 "0531 Failed setup of slow-path ELS CQ: " 7773 "rc = 0x%x\n", (uint32_t)rc); 7774 goto out_destroy_mbx_cq; 7775 } 7776 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7777 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7778 phba->sli4_hba.els_cq->queue_id, 7779 phba->sli4_hba.hba_eq[0]->queue_id); 7780 7781 /* 7782 * Set up all the Work Queues (WQs) 7783 */ 7784 7785 /* Set up Mailbox Command Queue */ 7786 if (!phba->sli4_hba.mbx_wq) { 7787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7788 "0538 Slow-path MQ not allocated\n"); 7789 rc = -ENOMEM; 7790 goto out_destroy_els_cq; 7791 } 7792 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7793 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7794 if (rc) { 7795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7796 "0539 Failed setup of slow-path MQ: " 7797 "rc = 0x%x\n", rc); 7798 goto out_destroy_els_cq; 7799 } 7800 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7801 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7802 phba->sli4_hba.mbx_wq->queue_id, 7803 phba->sli4_hba.mbx_cq->queue_id); 7804 7805 /* Set up slow-path ELS Work Queue */ 7806 if (!phba->sli4_hba.els_wq) { 7807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7808 "0536 Slow-path ELS WQ not allocated\n"); 7809 rc = -ENOMEM; 7810 goto out_destroy_mbx_wq; 7811 } 7812 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, 7813 phba->sli4_hba.els_cq, LPFC_ELS); 7814 if (rc) { 7815 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7816 "0537 Failed setup of slow-path ELS WQ: " 7817 "rc = 0x%x\n", (uint32_t)rc); 7818 goto out_destroy_mbx_wq; 7819 } 7820 7821 /* Bind this WQ to the ELS ring */ 7822 pring = &psli->ring[LPFC_ELS_RING]; 7823 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq; 7824 phba->sli4_hba.els_cq->pring = pring; 7825 7826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7827 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7828 phba->sli4_hba.els_wq->queue_id, 7829 phba->sli4_hba.els_cq->queue_id); 7830 7831 /* 7832 * Create Receive Queue (RQ) 7833 */ 7834 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { 7835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7836 "0540 Receive Queue not allocated\n"); 7837 rc = -ENOMEM; 7838 goto out_destroy_els_wq; 7839 } 7840 7841 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7842 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); 7843 7844 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 7845 phba->sli4_hba.els_cq, LPFC_USOL); 7846 if (rc) { 7847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7848 "0541 Failed setup of Receive Queue: " 7849 "rc = 0x%x\n", (uint32_t)rc); 7850 goto out_destroy_fcp_wq; 7851 } 7852 7853 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7854 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 7855 "parent cq-id=%d\n", 7856 phba->sli4_hba.hdr_rq->queue_id, 7857 phba->sli4_hba.dat_rq->queue_id, 7858 phba->sli4_hba.els_cq->queue_id); 7859 7860 if (phba->cfg_fof) { 7861 rc = lpfc_fof_queue_setup(phba); 7862 if (rc) { 7863 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7864 "0549 Failed setup of FOF Queues: " 7865 "rc = 0x%x\n", rc); 7866 goto out_destroy_els_rq; 7867 } 7868 } 7869 7870 /* 7871 * Configure EQ delay multipier for interrupt coalescing using 7872 * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time. 7873 */ 7874 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 7875 fcp_eqidx += LPFC_MAX_EQ_DELAY) 7876 lpfc_modify_fcp_eq_delay(phba, fcp_eqidx); 7877 return 0; 7878 7879 out_destroy_els_rq: 7880 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7881 out_destroy_els_wq: 7882 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7883 out_destroy_mbx_wq: 7884 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7885 out_destroy_els_cq: 7886 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7887 out_destroy_mbx_cq: 7888 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7889 out_destroy_fcp_wq: 7890 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) 7891 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); 7892 out_destroy_fcp_cq: 7893 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) 7894 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); 7895 out_destroy_hba_eq: 7896 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7897 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]); 7898 out_error: 7899 return rc; 7900 } 7901 7902 /** 7903 * lpfc_sli4_queue_unset - Unset all the SLI4 queues 7904 * @phba: pointer to lpfc hba data structure. 7905 * 7906 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA 7907 * operation. 7908 * 7909 * Return codes 7910 * 0 - successful 7911 * -ENOMEM - No available memory 7912 * -EIO - The mailbox failed to complete successfully. 7913 **/ 7914 void 7915 lpfc_sli4_queue_unset(struct lpfc_hba *phba) 7916 { 7917 int fcp_qidx; 7918 7919 /* Unset the queues created for Flash Optimized Fabric operations */ 7920 if (phba->cfg_fof) 7921 lpfc_fof_queue_destroy(phba); 7922 /* Unset mailbox command work queue */ 7923 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7924 /* Unset ELS work queue */ 7925 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7926 /* Unset unsolicited receive queue */ 7927 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7928 /* Unset FCP work queue */ 7929 if (phba->sli4_hba.fcp_wq) { 7930 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7931 fcp_qidx++) 7932 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7933 } 7934 /* Unset mailbox command complete queue */ 7935 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7936 /* Unset ELS complete queue */ 7937 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7938 /* Unset FCP response complete queue */ 7939 if (phba->sli4_hba.fcp_cq) { 7940 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7941 fcp_qidx++) 7942 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7943 } 7944 /* Unset fast-path event queue */ 7945 if (phba->sli4_hba.hba_eq) { 7946 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel; 7947 fcp_qidx++) 7948 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]); 7949 } 7950 } 7951 7952 /** 7953 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool 7954 * @phba: pointer to lpfc hba data structure. 7955 * 7956 * This routine is invoked to allocate and set up a pool of completion queue 7957 * events. The body of the completion queue event is a completion queue entry 7958 * CQE. For now, this pool is used for the interrupt service routine to queue 7959 * the following HBA completion queue events for the worker thread to process: 7960 * - Mailbox asynchronous events 7961 * - Receive queue completion unsolicited events 7962 * Later, this can be used for all the slow-path events. 7963 * 7964 * Return codes 7965 * 0 - successful 7966 * -ENOMEM - No available memory 7967 **/ 7968 static int 7969 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) 7970 { 7971 struct lpfc_cq_event *cq_event; 7972 int i; 7973 7974 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { 7975 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); 7976 if (!cq_event) 7977 goto out_pool_create_fail; 7978 list_add_tail(&cq_event->list, 7979 &phba->sli4_hba.sp_cqe_event_pool); 7980 } 7981 return 0; 7982 7983 out_pool_create_fail: 7984 lpfc_sli4_cq_event_pool_destroy(phba); 7985 return -ENOMEM; 7986 } 7987 7988 /** 7989 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool 7990 * @phba: pointer to lpfc hba data structure. 7991 * 7992 * This routine is invoked to free the pool of completion queue events at 7993 * driver unload time. Note that, it is the responsibility of the driver 7994 * cleanup routine to free all the outstanding completion-queue events 7995 * allocated from this pool back into the pool before invoking this routine 7996 * to destroy the pool. 7997 **/ 7998 static void 7999 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) 8000 { 8001 struct lpfc_cq_event *cq_event, *next_cq_event; 8002 8003 list_for_each_entry_safe(cq_event, next_cq_event, 8004 &phba->sli4_hba.sp_cqe_event_pool, list) { 8005 list_del(&cq_event->list); 8006 kfree(cq_event); 8007 } 8008 } 8009 8010 /** 8011 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8012 * @phba: pointer to lpfc hba data structure. 8013 * 8014 * This routine is the lock free version of the API invoked to allocate a 8015 * completion-queue event from the free pool. 8016 * 8017 * Return: Pointer to the newly allocated completion-queue event if successful 8018 * NULL otherwise. 8019 **/ 8020 struct lpfc_cq_event * 8021 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8022 { 8023 struct lpfc_cq_event *cq_event = NULL; 8024 8025 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, 8026 struct lpfc_cq_event, list); 8027 return cq_event; 8028 } 8029 8030 /** 8031 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool 8032 * @phba: pointer to lpfc hba data structure. 8033 * 8034 * This routine is the lock version of the API invoked to allocate a 8035 * completion-queue event from the free pool. 8036 * 8037 * Return: Pointer to the newly allocated completion-queue event if successful 8038 * NULL otherwise. 8039 **/ 8040 struct lpfc_cq_event * 8041 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) 8042 { 8043 struct lpfc_cq_event *cq_event; 8044 unsigned long iflags; 8045 8046 spin_lock_irqsave(&phba->hbalock, iflags); 8047 cq_event = __lpfc_sli4_cq_event_alloc(phba); 8048 spin_unlock_irqrestore(&phba->hbalock, iflags); 8049 return cq_event; 8050 } 8051 8052 /** 8053 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8054 * @phba: pointer to lpfc hba data structure. 8055 * @cq_event: pointer to the completion queue event to be freed. 8056 * 8057 * This routine is the lock free version of the API invoked to release a 8058 * completion-queue event back into the free pool. 8059 **/ 8060 void 8061 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8062 struct lpfc_cq_event *cq_event) 8063 { 8064 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); 8065 } 8066 8067 /** 8068 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool 8069 * @phba: pointer to lpfc hba data structure. 8070 * @cq_event: pointer to the completion queue event to be freed. 8071 * 8072 * This routine is the lock version of the API invoked to release a 8073 * completion-queue event back into the free pool. 8074 **/ 8075 void 8076 lpfc_sli4_cq_event_release(struct lpfc_hba *phba, 8077 struct lpfc_cq_event *cq_event) 8078 { 8079 unsigned long iflags; 8080 spin_lock_irqsave(&phba->hbalock, iflags); 8081 __lpfc_sli4_cq_event_release(phba, cq_event); 8082 spin_unlock_irqrestore(&phba->hbalock, iflags); 8083 } 8084 8085 /** 8086 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool 8087 * @phba: pointer to lpfc hba data structure. 8088 * 8089 * This routine is to free all the pending completion-queue events to the 8090 * back into the free pool for device reset. 8091 **/ 8092 static void 8093 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) 8094 { 8095 LIST_HEAD(cqelist); 8096 struct lpfc_cq_event *cqe; 8097 unsigned long iflags; 8098 8099 /* Retrieve all the pending WCQEs from pending WCQE lists */ 8100 spin_lock_irqsave(&phba->hbalock, iflags); 8101 /* Pending FCP XRI abort events */ 8102 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 8103 &cqelist); 8104 /* Pending ELS XRI abort events */ 8105 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 8106 &cqelist); 8107 /* Pending asynnc events */ 8108 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, 8109 &cqelist); 8110 spin_unlock_irqrestore(&phba->hbalock, iflags); 8111 8112 while (!list_empty(&cqelist)) { 8113 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); 8114 lpfc_sli4_cq_event_release(phba, cqe); 8115 } 8116 } 8117 8118 /** 8119 * lpfc_pci_function_reset - Reset pci function. 8120 * @phba: pointer to lpfc hba data structure. 8121 * 8122 * This routine is invoked to request a PCI function reset. It will destroys 8123 * all resources assigned to the PCI function which originates this request. 8124 * 8125 * Return codes 8126 * 0 - successful 8127 * -ENOMEM - No available memory 8128 * -EIO - The mailbox failed to complete successfully. 8129 **/ 8130 int 8131 lpfc_pci_function_reset(struct lpfc_hba *phba) 8132 { 8133 LPFC_MBOXQ_t *mboxq; 8134 uint32_t rc = 0, if_type; 8135 uint32_t shdr_status, shdr_add_status; 8136 uint32_t rdy_chk; 8137 uint32_t port_reset = 0; 8138 union lpfc_sli4_cfg_shdr *shdr; 8139 struct lpfc_register reg_data; 8140 uint16_t devid; 8141 8142 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8143 switch (if_type) { 8144 case LPFC_SLI_INTF_IF_TYPE_0: 8145 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 8146 GFP_KERNEL); 8147 if (!mboxq) { 8148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8149 "0494 Unable to allocate memory for " 8150 "issuing SLI_FUNCTION_RESET mailbox " 8151 "command\n"); 8152 return -ENOMEM; 8153 } 8154 8155 /* Setup PCI function reset mailbox-ioctl command */ 8156 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 8157 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, 8158 LPFC_SLI4_MBX_EMBED); 8159 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8160 shdr = (union lpfc_sli4_cfg_shdr *) 8161 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; 8162 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 8163 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 8164 &shdr->response); 8165 if (rc != MBX_TIMEOUT) 8166 mempool_free(mboxq, phba->mbox_mem_pool); 8167 if (shdr_status || shdr_add_status || rc) { 8168 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8169 "0495 SLI_FUNCTION_RESET mailbox " 8170 "failed with status x%x add_status x%x," 8171 " mbx status x%x\n", 8172 shdr_status, shdr_add_status, rc); 8173 rc = -ENXIO; 8174 } 8175 break; 8176 case LPFC_SLI_INTF_IF_TYPE_2: 8177 wait: 8178 /* 8179 * Poll the Port Status Register and wait for RDY for 8180 * up to 30 seconds. If the port doesn't respond, treat 8181 * it as an error. 8182 */ 8183 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) { 8184 if (lpfc_readl(phba->sli4_hba.u.if_type2. 8185 STATUSregaddr, ®_data.word0)) { 8186 rc = -ENODEV; 8187 goto out; 8188 } 8189 if (bf_get(lpfc_sliport_status_rdy, ®_data)) 8190 break; 8191 msleep(20); 8192 } 8193 8194 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) { 8195 phba->work_status[0] = readl( 8196 phba->sli4_hba.u.if_type2.ERR1regaddr); 8197 phba->work_status[1] = readl( 8198 phba->sli4_hba.u.if_type2.ERR2regaddr); 8199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8200 "2890 Port not ready, port status reg " 8201 "0x%x error 1=0x%x, error 2=0x%x\n", 8202 reg_data.word0, 8203 phba->work_status[0], 8204 phba->work_status[1]); 8205 rc = -ENODEV; 8206 goto out; 8207 } 8208 8209 if (!port_reset) { 8210 /* 8211 * Reset the port now 8212 */ 8213 reg_data.word0 = 0; 8214 bf_set(lpfc_sliport_ctrl_end, ®_data, 8215 LPFC_SLIPORT_LITTLE_ENDIAN); 8216 bf_set(lpfc_sliport_ctrl_ip, ®_data, 8217 LPFC_SLIPORT_INIT_PORT); 8218 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 8219 CTRLregaddr); 8220 /* flush */ 8221 pci_read_config_word(phba->pcidev, 8222 PCI_DEVICE_ID, &devid); 8223 8224 port_reset = 1; 8225 msleep(20); 8226 goto wait; 8227 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) { 8228 rc = -ENODEV; 8229 goto out; 8230 } 8231 break; 8232 8233 case LPFC_SLI_INTF_IF_TYPE_1: 8234 default: 8235 break; 8236 } 8237 8238 out: 8239 /* Catch the not-ready port failure after a port reset. */ 8240 if (rc) { 8241 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8242 "3317 HBA not functional: IP Reset Failed " 8243 "try: echo fw_reset > board_mode\n"); 8244 rc = -ENODEV; 8245 } 8246 8247 return rc; 8248 } 8249 8250 /** 8251 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. 8252 * @phba: pointer to lpfc hba data structure. 8253 * 8254 * This routine is invoked to set up the PCI device memory space for device 8255 * with SLI-4 interface spec. 8256 * 8257 * Return codes 8258 * 0 - successful 8259 * other values - error 8260 **/ 8261 static int 8262 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) 8263 { 8264 struct pci_dev *pdev; 8265 unsigned long bar0map_len, bar1map_len, bar2map_len; 8266 int error = -ENODEV; 8267 uint32_t if_type; 8268 8269 /* Obtain PCI device reference */ 8270 if (!phba->pcidev) 8271 return error; 8272 else 8273 pdev = phba->pcidev; 8274 8275 /* Set the device DMA mask size */ 8276 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 8277 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { 8278 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 8279 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { 8280 return error; 8281 } 8282 } 8283 8284 /* 8285 * The BARs and register set definitions and offset locations are 8286 * dependent on the if_type. 8287 */ 8288 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 8289 &phba->sli4_hba.sli_intf.word0)) { 8290 return error; 8291 } 8292 8293 /* There is no SLI3 failback for SLI4 devices. */ 8294 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != 8295 LPFC_SLI_INTF_VALID) { 8296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8297 "2894 SLI_INTF reg contents invalid " 8298 "sli_intf reg 0x%x\n", 8299 phba->sli4_hba.sli_intf.word0); 8300 return error; 8301 } 8302 8303 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8304 /* 8305 * Get the bus address of SLI4 device Bar regions and the 8306 * number of bytes required by each mapping. The mapping of the 8307 * particular PCI BARs regions is dependent on the type of 8308 * SLI4 device. 8309 */ 8310 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { 8311 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); 8312 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); 8313 8314 /* 8315 * Map SLI4 PCI Config Space Register base to a kernel virtual 8316 * addr 8317 */ 8318 phba->sli4_hba.conf_regs_memmap_p = 8319 ioremap(phba->pci_bar0_map, bar0map_len); 8320 if (!phba->sli4_hba.conf_regs_memmap_p) { 8321 dev_printk(KERN_ERR, &pdev->dev, 8322 "ioremap failed for SLI4 PCI config " 8323 "registers.\n"); 8324 goto out; 8325 } 8326 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 8327 /* Set up BAR0 PCI config space register memory map */ 8328 lpfc_sli4_bar0_register_memmap(phba, if_type); 8329 } else { 8330 phba->pci_bar0_map = pci_resource_start(pdev, 1); 8331 bar0map_len = pci_resource_len(pdev, 1); 8332 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8333 dev_printk(KERN_ERR, &pdev->dev, 8334 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 8335 goto out; 8336 } 8337 phba->sli4_hba.conf_regs_memmap_p = 8338 ioremap(phba->pci_bar0_map, bar0map_len); 8339 if (!phba->sli4_hba.conf_regs_memmap_p) { 8340 dev_printk(KERN_ERR, &pdev->dev, 8341 "ioremap failed for SLI4 PCI config " 8342 "registers.\n"); 8343 goto out; 8344 } 8345 lpfc_sli4_bar0_register_memmap(phba, if_type); 8346 } 8347 8348 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8349 (pci_resource_start(pdev, PCI_64BIT_BAR2))) { 8350 /* 8351 * Map SLI4 if type 0 HBA Control Register base to a kernel 8352 * virtual address and setup the registers. 8353 */ 8354 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); 8355 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); 8356 phba->sli4_hba.ctrl_regs_memmap_p = 8357 ioremap(phba->pci_bar1_map, bar1map_len); 8358 if (!phba->sli4_hba.ctrl_regs_memmap_p) { 8359 dev_printk(KERN_ERR, &pdev->dev, 8360 "ioremap failed for SLI4 HBA control registers.\n"); 8361 goto out_iounmap_conf; 8362 } 8363 phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; 8364 lpfc_sli4_bar1_register_memmap(phba); 8365 } 8366 8367 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && 8368 (pci_resource_start(pdev, PCI_64BIT_BAR4))) { 8369 /* 8370 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel 8371 * virtual address and setup the registers. 8372 */ 8373 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); 8374 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); 8375 phba->sli4_hba.drbl_regs_memmap_p = 8376 ioremap(phba->pci_bar2_map, bar2map_len); 8377 if (!phba->sli4_hba.drbl_regs_memmap_p) { 8378 dev_printk(KERN_ERR, &pdev->dev, 8379 "ioremap failed for SLI4 HBA doorbell registers.\n"); 8380 goto out_iounmap_ctrl; 8381 } 8382 phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 8383 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); 8384 if (error) 8385 goto out_iounmap_all; 8386 } 8387 8388 return 0; 8389 8390 out_iounmap_all: 8391 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8392 out_iounmap_ctrl: 8393 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8394 out_iounmap_conf: 8395 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8396 out: 8397 return error; 8398 } 8399 8400 /** 8401 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. 8402 * @phba: pointer to lpfc hba data structure. 8403 * 8404 * This routine is invoked to unset the PCI device memory space for device 8405 * with SLI-4 interface spec. 8406 **/ 8407 static void 8408 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) 8409 { 8410 uint32_t if_type; 8411 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8412 8413 switch (if_type) { 8414 case LPFC_SLI_INTF_IF_TYPE_0: 8415 iounmap(phba->sli4_hba.drbl_regs_memmap_p); 8416 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 8417 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8418 break; 8419 case LPFC_SLI_INTF_IF_TYPE_2: 8420 iounmap(phba->sli4_hba.conf_regs_memmap_p); 8421 break; 8422 case LPFC_SLI_INTF_IF_TYPE_1: 8423 default: 8424 dev_printk(KERN_ERR, &phba->pcidev->dev, 8425 "FATAL - unsupported SLI4 interface type - %d\n", 8426 if_type); 8427 break; 8428 } 8429 } 8430 8431 /** 8432 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device 8433 * @phba: pointer to lpfc hba data structure. 8434 * 8435 * This routine is invoked to enable the MSI-X interrupt vectors to device 8436 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact() 8437 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(), 8438 * once invoked, enables either all or nothing, depending on the current 8439 * availability of PCI vector resources. The device driver is responsible 8440 * for calling the individual request_irq() to register each MSI-X vector 8441 * with a interrupt handler, which is done in this function. Note that 8442 * later when device is unloading, the driver should always call free_irq() 8443 * on all MSI-X vectors it has done request_irq() on before calling 8444 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 8445 * will be left with MSI-X enabled and leaks its vectors. 8446 * 8447 * Return codes 8448 * 0 - successful 8449 * other values - error 8450 **/ 8451 static int 8452 lpfc_sli_enable_msix(struct lpfc_hba *phba) 8453 { 8454 int rc, i; 8455 LPFC_MBOXQ_t *pmb; 8456 8457 /* Set up MSI-X multi-message vectors */ 8458 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8459 phba->msix_entries[i].entry = i; 8460 8461 /* Configure MSI-X capability structure */ 8462 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries, 8463 LPFC_MSIX_VECTORS); 8464 if (rc) { 8465 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8466 "0420 PCI enable MSI-X failed (%d)\n", rc); 8467 goto vec_fail_out; 8468 } 8469 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8471 "0477 MSI-X entry[%d]: vector=x%x " 8472 "message=%d\n", i, 8473 phba->msix_entries[i].vector, 8474 phba->msix_entries[i].entry); 8475 /* 8476 * Assign MSI-X vectors to interrupt handlers 8477 */ 8478 8479 /* vector-0 is associated to slow-path handler */ 8480 rc = request_irq(phba->msix_entries[0].vector, 8481 &lpfc_sli_sp_intr_handler, 0, 8482 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8483 if (rc) { 8484 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8485 "0421 MSI-X slow-path request_irq failed " 8486 "(%d)\n", rc); 8487 goto msi_fail_out; 8488 } 8489 8490 /* vector-1 is associated to fast-path handler */ 8491 rc = request_irq(phba->msix_entries[1].vector, 8492 &lpfc_sli_fp_intr_handler, 0, 8493 LPFC_FP_DRIVER_HANDLER_NAME, phba); 8494 8495 if (rc) { 8496 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8497 "0429 MSI-X fast-path request_irq failed " 8498 "(%d)\n", rc); 8499 goto irq_fail_out; 8500 } 8501 8502 /* 8503 * Configure HBA MSI-X attention conditions to messages 8504 */ 8505 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8506 8507 if (!pmb) { 8508 rc = -ENOMEM; 8509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8510 "0474 Unable to allocate memory for issuing " 8511 "MBOX_CONFIG_MSI command\n"); 8512 goto mem_fail_out; 8513 } 8514 rc = lpfc_config_msi(phba, pmb); 8515 if (rc) 8516 goto mbx_fail_out; 8517 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 8518 if (rc != MBX_SUCCESS) { 8519 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 8520 "0351 Config MSI mailbox command failed, " 8521 "mbxCmd x%x, mbxStatus x%x\n", 8522 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); 8523 goto mbx_fail_out; 8524 } 8525 8526 /* Free memory allocated for mailbox command */ 8527 mempool_free(pmb, phba->mbox_mem_pool); 8528 return rc; 8529 8530 mbx_fail_out: 8531 /* Free memory allocated for mailbox command */ 8532 mempool_free(pmb, phba->mbox_mem_pool); 8533 8534 mem_fail_out: 8535 /* free the irq already requested */ 8536 free_irq(phba->msix_entries[1].vector, phba); 8537 8538 irq_fail_out: 8539 /* free the irq already requested */ 8540 free_irq(phba->msix_entries[0].vector, phba); 8541 8542 msi_fail_out: 8543 /* Unconfigure MSI-X capability structure */ 8544 pci_disable_msix(phba->pcidev); 8545 8546 vec_fail_out: 8547 return rc; 8548 } 8549 8550 /** 8551 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. 8552 * @phba: pointer to lpfc hba data structure. 8553 * 8554 * This routine is invoked to release the MSI-X vectors and then disable the 8555 * MSI-X interrupt mode to device with SLI-3 interface spec. 8556 **/ 8557 static void 8558 lpfc_sli_disable_msix(struct lpfc_hba *phba) 8559 { 8560 int i; 8561 8562 /* Free up MSI-X multi-message vectors */ 8563 for (i = 0; i < LPFC_MSIX_VECTORS; i++) 8564 free_irq(phba->msix_entries[i].vector, phba); 8565 /* Disable MSI-X */ 8566 pci_disable_msix(phba->pcidev); 8567 8568 return; 8569 } 8570 8571 /** 8572 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. 8573 * @phba: pointer to lpfc hba data structure. 8574 * 8575 * This routine is invoked to enable the MSI interrupt mode to device with 8576 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to 8577 * enable the MSI vector. The device driver is responsible for calling the 8578 * request_irq() to register MSI vector with a interrupt the handler, which 8579 * is done in this function. 8580 * 8581 * Return codes 8582 * 0 - successful 8583 * other values - error 8584 */ 8585 static int 8586 lpfc_sli_enable_msi(struct lpfc_hba *phba) 8587 { 8588 int rc; 8589 8590 rc = pci_enable_msi(phba->pcidev); 8591 if (!rc) 8592 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8593 "0462 PCI enable MSI mode success.\n"); 8594 else { 8595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8596 "0471 PCI enable MSI mode failed (%d)\n", rc); 8597 return rc; 8598 } 8599 8600 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8601 0, LPFC_DRIVER_NAME, phba); 8602 if (rc) { 8603 pci_disable_msi(phba->pcidev); 8604 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8605 "0478 MSI request_irq failed (%d)\n", rc); 8606 } 8607 return rc; 8608 } 8609 8610 /** 8611 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. 8612 * @phba: pointer to lpfc hba data structure. 8613 * 8614 * This routine is invoked to disable the MSI interrupt mode to device with 8615 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has 8616 * done request_irq() on before calling pci_disable_msi(). Failure to do so 8617 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 8618 * its vector. 8619 */ 8620 static void 8621 lpfc_sli_disable_msi(struct lpfc_hba *phba) 8622 { 8623 free_irq(phba->pcidev->irq, phba); 8624 pci_disable_msi(phba->pcidev); 8625 return; 8626 } 8627 8628 /** 8629 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. 8630 * @phba: pointer to lpfc hba data structure. 8631 * 8632 * This routine is invoked to enable device interrupt and associate driver's 8633 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface 8634 * spec. Depends on the interrupt mode configured to the driver, the driver 8635 * will try to fallback from the configured interrupt mode to an interrupt 8636 * mode which is supported by the platform, kernel, and device in the order 8637 * of: 8638 * MSI-X -> MSI -> IRQ. 8639 * 8640 * Return codes 8641 * 0 - successful 8642 * other values - error 8643 **/ 8644 static uint32_t 8645 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 8646 { 8647 uint32_t intr_mode = LPFC_INTR_ERROR; 8648 int retval; 8649 8650 if (cfg_mode == 2) { 8651 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ 8652 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); 8653 if (!retval) { 8654 /* Now, try to enable MSI-X interrupt mode */ 8655 retval = lpfc_sli_enable_msix(phba); 8656 if (!retval) { 8657 /* Indicate initialization to MSI-X mode */ 8658 phba->intr_type = MSIX; 8659 intr_mode = 2; 8660 } 8661 } 8662 } 8663 8664 /* Fallback to MSI if MSI-X initialization failed */ 8665 if (cfg_mode >= 1 && phba->intr_type == NONE) { 8666 retval = lpfc_sli_enable_msi(phba); 8667 if (!retval) { 8668 /* Indicate initialization to MSI mode */ 8669 phba->intr_type = MSI; 8670 intr_mode = 1; 8671 } 8672 } 8673 8674 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 8675 if (phba->intr_type == NONE) { 8676 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, 8677 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 8678 if (!retval) { 8679 /* Indicate initialization to INTx mode */ 8680 phba->intr_type = INTx; 8681 intr_mode = 0; 8682 } 8683 } 8684 return intr_mode; 8685 } 8686 8687 /** 8688 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. 8689 * @phba: pointer to lpfc hba data structure. 8690 * 8691 * This routine is invoked to disable device interrupt and disassociate the 8692 * driver's interrupt handler(s) from interrupt vector(s) to device with 8693 * SLI-3 interface spec. Depending on the interrupt mode, the driver will 8694 * release the interrupt vector(s) for the message signaled interrupt. 8695 **/ 8696 static void 8697 lpfc_sli_disable_intr(struct lpfc_hba *phba) 8698 { 8699 /* Disable the currently initialized interrupt mode */ 8700 if (phba->intr_type == MSIX) 8701 lpfc_sli_disable_msix(phba); 8702 else if (phba->intr_type == MSI) 8703 lpfc_sli_disable_msi(phba); 8704 else if (phba->intr_type == INTx) 8705 free_irq(phba->pcidev->irq, phba); 8706 8707 /* Reset interrupt management states */ 8708 phba->intr_type = NONE; 8709 phba->sli.slistat.sli_intr = 0; 8710 8711 return; 8712 } 8713 8714 /** 8715 * lpfc_find_next_cpu - Find next available CPU that matches the phys_id 8716 * @phba: pointer to lpfc hba data structure. 8717 * 8718 * Find next available CPU to use for IRQ to CPU affinity. 8719 */ 8720 static int 8721 lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) 8722 { 8723 struct lpfc_vector_map_info *cpup; 8724 int cpu; 8725 8726 cpup = phba->sli4_hba.cpu_map; 8727 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8728 /* CPU must be online */ 8729 if (cpu_online(cpu)) { 8730 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8731 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && 8732 (cpup->phys_id == phys_id)) { 8733 return cpu; 8734 } 8735 } 8736 cpup++; 8737 } 8738 8739 /* 8740 * If we get here, we have used ALL CPUs for the specific 8741 * phys_id. Now we need to clear out lpfc_used_cpu and start 8742 * reusing CPUs. 8743 */ 8744 8745 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8746 if (lpfc_used_cpu[cpu] == phys_id) 8747 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; 8748 } 8749 8750 cpup = phba->sli4_hba.cpu_map; 8751 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8752 /* CPU must be online */ 8753 if (cpu_online(cpu)) { 8754 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && 8755 (cpup->phys_id == phys_id)) { 8756 return cpu; 8757 } 8758 } 8759 cpup++; 8760 } 8761 return LPFC_VECTOR_MAP_EMPTY; 8762 } 8763 8764 /** 8765 * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors 8766 * @phba: pointer to lpfc hba data structure. 8767 * @vectors: number of HBA vectors 8768 * 8769 * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector 8770 * affinization across multple physical CPUs (numa nodes). 8771 * In addition, this routine will assign an IO channel for each CPU 8772 * to use when issuing I/Os. 8773 */ 8774 static int 8775 lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) 8776 { 8777 int i, idx, saved_chann, used_chann, cpu, phys_id; 8778 int max_phys_id, min_phys_id; 8779 int num_io_channel, first_cpu, chan; 8780 struct lpfc_vector_map_info *cpup; 8781 #ifdef CONFIG_X86 8782 struct cpuinfo_x86 *cpuinfo; 8783 #endif 8784 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; 8785 8786 /* If there is no mapping, just return */ 8787 if (!phba->cfg_fcp_cpu_map) 8788 return 1; 8789 8790 /* Init cpu_map array */ 8791 memset(phba->sli4_hba.cpu_map, 0xff, 8792 (sizeof(struct lpfc_vector_map_info) * 8793 phba->sli4_hba.num_present_cpu)); 8794 8795 max_phys_id = 0; 8796 min_phys_id = 0xff; 8797 phys_id = 0; 8798 num_io_channel = 0; 8799 first_cpu = LPFC_VECTOR_MAP_EMPTY; 8800 8801 /* Update CPU map with physical id and core id of each CPU */ 8802 cpup = phba->sli4_hba.cpu_map; 8803 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { 8804 #ifdef CONFIG_X86 8805 cpuinfo = &cpu_data(cpu); 8806 cpup->phys_id = cpuinfo->phys_proc_id; 8807 cpup->core_id = cpuinfo->cpu_core_id; 8808 #else 8809 /* No distinction between CPUs for other platforms */ 8810 cpup->phys_id = 0; 8811 cpup->core_id = 0; 8812 #endif 8813 8814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8815 "3328 CPU physid %d coreid %d\n", 8816 cpup->phys_id, cpup->core_id); 8817 8818 if (cpup->phys_id > max_phys_id) 8819 max_phys_id = cpup->phys_id; 8820 if (cpup->phys_id < min_phys_id) 8821 min_phys_id = cpup->phys_id; 8822 cpup++; 8823 } 8824 8825 phys_id = min_phys_id; 8826 /* Now associate the HBA vectors with specific CPUs */ 8827 for (idx = 0; idx < vectors; idx++) { 8828 cpup = phba->sli4_hba.cpu_map; 8829 cpu = lpfc_find_next_cpu(phba, phys_id); 8830 if (cpu == LPFC_VECTOR_MAP_EMPTY) { 8831 8832 /* Try for all phys_id's */ 8833 for (i = 1; i < max_phys_id; i++) { 8834 phys_id++; 8835 if (phys_id > max_phys_id) 8836 phys_id = min_phys_id; 8837 cpu = lpfc_find_next_cpu(phba, phys_id); 8838 if (cpu == LPFC_VECTOR_MAP_EMPTY) 8839 continue; 8840 goto found; 8841 } 8842 8843 /* Use round robin for scheduling */ 8844 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; 8845 chan = 0; 8846 cpup = phba->sli4_hba.cpu_map; 8847 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { 8848 cpup->channel_id = chan; 8849 cpup++; 8850 chan++; 8851 if (chan >= phba->cfg_fcp_io_channel) 8852 chan = 0; 8853 } 8854 8855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8856 "3329 Cannot set affinity:" 8857 "Error mapping vector %d (%d)\n", 8858 idx, vectors); 8859 return 0; 8860 } 8861 found: 8862 cpup += cpu; 8863 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) 8864 lpfc_used_cpu[cpu] = phys_id; 8865 8866 /* Associate vector with selected CPU */ 8867 cpup->irq = phba->sli4_hba.msix_entries[idx].vector; 8868 8869 /* Associate IO channel with selected CPU */ 8870 cpup->channel_id = idx; 8871 num_io_channel++; 8872 8873 if (first_cpu == LPFC_VECTOR_MAP_EMPTY) 8874 first_cpu = cpu; 8875 8876 /* Now affinitize to the selected CPU */ 8877 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. 8878 vector, get_cpu_mask(cpu)); 8879 8880 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8881 "3330 Set Affinity: CPU %d channel %d " 8882 "irq %d (%x)\n", 8883 cpu, cpup->channel_id, 8884 phba->sli4_hba.msix_entries[idx].vector, i); 8885 8886 /* Spread vector mapping across multple physical CPU nodes */ 8887 phys_id++; 8888 if (phys_id > max_phys_id) 8889 phys_id = min_phys_id; 8890 } 8891 8892 /* 8893 * Finally fill in the IO channel for any remaining CPUs. 8894 * At this point, all IO channels have been assigned to a specific 8895 * MSIx vector, mapped to a specific CPU. 8896 * Base the remaining IO channel assigned, to IO channels already 8897 * assigned to other CPUs on the same phys_id. 8898 */ 8899 for (i = min_phys_id; i <= max_phys_id; i++) { 8900 /* 8901 * If there are no io channels already mapped to 8902 * this phys_id, just round robin thru the io_channels. 8903 * Setup chann[] for round robin. 8904 */ 8905 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) 8906 chann[idx] = idx; 8907 8908 saved_chann = 0; 8909 used_chann = 0; 8910 8911 /* 8912 * First build a list of IO channels already assigned 8913 * to this phys_id before reassigning the same IO 8914 * channels to the remaining CPUs. 8915 */ 8916 cpup = phba->sli4_hba.cpu_map; 8917 cpu = first_cpu; 8918 cpup += cpu; 8919 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; 8920 idx++) { 8921 if (cpup->phys_id == i) { 8922 /* 8923 * Save any IO channels that are 8924 * already mapped to this phys_id. 8925 */ 8926 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { 8927 if (saved_chann <= 8928 LPFC_FCP_IO_CHAN_MAX) { 8929 chann[saved_chann] = 8930 cpup->channel_id; 8931 saved_chann++; 8932 } 8933 goto out; 8934 } 8935 8936 /* See if we are using round-robin */ 8937 if (saved_chann == 0) 8938 saved_chann = 8939 phba->cfg_fcp_io_channel; 8940 8941 /* Associate next IO channel with CPU */ 8942 cpup->channel_id = chann[used_chann]; 8943 num_io_channel++; 8944 used_chann++; 8945 if (used_chann == saved_chann) 8946 used_chann = 0; 8947 8948 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8949 "3331 Set IO_CHANN " 8950 "CPU %d channel %d\n", 8951 idx, cpup->channel_id); 8952 } 8953 out: 8954 cpu++; 8955 if (cpu >= phba->sli4_hba.num_present_cpu) { 8956 cpup = phba->sli4_hba.cpu_map; 8957 cpu = 0; 8958 } else { 8959 cpup++; 8960 } 8961 } 8962 } 8963 8964 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { 8965 cpup = phba->sli4_hba.cpu_map; 8966 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { 8967 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { 8968 cpup->channel_id = 0; 8969 num_io_channel++; 8970 8971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8972 "3332 Assign IO_CHANN " 8973 "CPU %d channel %d\n", 8974 idx, cpup->channel_id); 8975 } 8976 cpup++; 8977 } 8978 } 8979 8980 /* Sanity check */ 8981 if (num_io_channel != phba->sli4_hba.num_present_cpu) 8982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8983 "3333 Set affinity mismatch:" 8984 "%d chann != %d cpus: %d vectors\n", 8985 num_io_channel, phba->sli4_hba.num_present_cpu, 8986 vectors); 8987 8988 /* Enable using cpu affinity for scheduling */ 8989 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; 8990 return 1; 8991 } 8992 8993 8994 /** 8995 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device 8996 * @phba: pointer to lpfc hba data structure. 8997 * 8998 * This routine is invoked to enable the MSI-X interrupt vectors to device 8999 * with SLI-4 interface spec. The kernel function pci_enable_msix_range() 9000 * is called to enable the MSI-X vectors. The device driver is responsible 9001 * for calling the individual request_irq() to register each MSI-X vector 9002 * with a interrupt handler, which is done in this function. Note that 9003 * later when device is unloading, the driver should always call free_irq() 9004 * on all MSI-X vectors it has done request_irq() on before calling 9005 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device 9006 * will be left with MSI-X enabled and leaks its vectors. 9007 * 9008 * Return codes 9009 * 0 - successful 9010 * other values - error 9011 **/ 9012 static int 9013 lpfc_sli4_enable_msix(struct lpfc_hba *phba) 9014 { 9015 int vectors, rc, index; 9016 9017 /* Set up MSI-X multi-message vectors */ 9018 for (index = 0; index < phba->cfg_fcp_io_channel; index++) 9019 phba->sli4_hba.msix_entries[index].entry = index; 9020 9021 /* Configure MSI-X capability structure */ 9022 vectors = phba->cfg_fcp_io_channel; 9023 if (phba->cfg_fof) { 9024 phba->sli4_hba.msix_entries[index].entry = index; 9025 vectors++; 9026 } 9027 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries, 9028 2, vectors); 9029 if (rc < 0) { 9030 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9031 "0484 PCI enable MSI-X failed (%d)\n", rc); 9032 goto vec_fail_out; 9033 } 9034 vectors = rc; 9035 9036 /* Log MSI-X vector assignment */ 9037 for (index = 0; index < vectors; index++) 9038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9039 "0489 MSI-X entry[%d]: vector=x%x " 9040 "message=%d\n", index, 9041 phba->sli4_hba.msix_entries[index].vector, 9042 phba->sli4_hba.msix_entries[index].entry); 9043 9044 /* Assign MSI-X vectors to interrupt handlers */ 9045 for (index = 0; index < vectors; index++) { 9046 memset(&phba->sli4_hba.handler_name[index], 0, 16); 9047 snprintf((char *)&phba->sli4_hba.handler_name[index], 9048 LPFC_SLI4_HANDLER_NAME_SZ, 9049 LPFC_DRIVER_HANDLER_NAME"%d", index); 9050 9051 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9052 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9053 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); 9054 if (phba->cfg_fof && (index == (vectors - 1))) 9055 rc = request_irq( 9056 phba->sli4_hba.msix_entries[index].vector, 9057 &lpfc_sli4_fof_intr_handler, 0, 9058 (char *)&phba->sli4_hba.handler_name[index], 9059 &phba->sli4_hba.fcp_eq_hdl[index]); 9060 else 9061 rc = request_irq( 9062 phba->sli4_hba.msix_entries[index].vector, 9063 &lpfc_sli4_hba_intr_handler, 0, 9064 (char *)&phba->sli4_hba.handler_name[index], 9065 &phba->sli4_hba.fcp_eq_hdl[index]); 9066 if (rc) { 9067 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9068 "0486 MSI-X fast-path (%d) " 9069 "request_irq failed (%d)\n", index, rc); 9070 goto cfg_fail_out; 9071 } 9072 } 9073 9074 if (phba->cfg_fof) 9075 vectors--; 9076 9077 if (vectors != phba->cfg_fcp_io_channel) { 9078 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9079 "3238 Reducing IO channels to match number of " 9080 "MSI-X vectors, requested %d got %d\n", 9081 phba->cfg_fcp_io_channel, vectors); 9082 phba->cfg_fcp_io_channel = vectors; 9083 } 9084 9085 if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) 9086 lpfc_sli4_set_affinity(phba, vectors); 9087 return rc; 9088 9089 cfg_fail_out: 9090 /* free the irq already requested */ 9091 for (--index; index >= 0; index--) { 9092 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9093 vector, NULL); 9094 free_irq(phba->sli4_hba.msix_entries[index].vector, 9095 &phba->sli4_hba.fcp_eq_hdl[index]); 9096 } 9097 9098 /* Unconfigure MSI-X capability structure */ 9099 pci_disable_msix(phba->pcidev); 9100 9101 vec_fail_out: 9102 return rc; 9103 } 9104 9105 /** 9106 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device 9107 * @phba: pointer to lpfc hba data structure. 9108 * 9109 * This routine is invoked to release the MSI-X vectors and then disable the 9110 * MSI-X interrupt mode to device with SLI-4 interface spec. 9111 **/ 9112 static void 9113 lpfc_sli4_disable_msix(struct lpfc_hba *phba) 9114 { 9115 int index; 9116 9117 /* Free up MSI-X multi-message vectors */ 9118 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9119 irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. 9120 vector, NULL); 9121 free_irq(phba->sli4_hba.msix_entries[index].vector, 9122 &phba->sli4_hba.fcp_eq_hdl[index]); 9123 } 9124 if (phba->cfg_fof) { 9125 free_irq(phba->sli4_hba.msix_entries[index].vector, 9126 &phba->sli4_hba.fcp_eq_hdl[index]); 9127 } 9128 /* Disable MSI-X */ 9129 pci_disable_msix(phba->pcidev); 9130 9131 return; 9132 } 9133 9134 /** 9135 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device 9136 * @phba: pointer to lpfc hba data structure. 9137 * 9138 * This routine is invoked to enable the MSI interrupt mode to device with 9139 * SLI-4 interface spec. The kernel function pci_enable_msi() is called 9140 * to enable the MSI vector. The device driver is responsible for calling 9141 * the request_irq() to register MSI vector with a interrupt the handler, 9142 * which is done in this function. 9143 * 9144 * Return codes 9145 * 0 - successful 9146 * other values - error 9147 **/ 9148 static int 9149 lpfc_sli4_enable_msi(struct lpfc_hba *phba) 9150 { 9151 int rc, index; 9152 9153 rc = pci_enable_msi(phba->pcidev); 9154 if (!rc) 9155 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9156 "0487 PCI enable MSI mode success.\n"); 9157 else { 9158 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9159 "0488 PCI enable MSI mode failed (%d)\n", rc); 9160 return rc; 9161 } 9162 9163 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9164 0, LPFC_DRIVER_NAME, phba); 9165 if (rc) { 9166 pci_disable_msi(phba->pcidev); 9167 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 9168 "0490 MSI request_irq failed (%d)\n", rc); 9169 return rc; 9170 } 9171 9172 for (index = 0; index < phba->cfg_fcp_io_channel; index++) { 9173 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9174 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9175 } 9176 9177 if (phba->cfg_fof) { 9178 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9179 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9180 } 9181 return 0; 9182 } 9183 9184 /** 9185 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device 9186 * @phba: pointer to lpfc hba data structure. 9187 * 9188 * This routine is invoked to disable the MSI interrupt mode to device with 9189 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has 9190 * done request_irq() on before calling pci_disable_msi(). Failure to do so 9191 * results in a BUG_ON() and a device will be left with MSI enabled and leaks 9192 * its vector. 9193 **/ 9194 static void 9195 lpfc_sli4_disable_msi(struct lpfc_hba *phba) 9196 { 9197 free_irq(phba->pcidev->irq, phba); 9198 pci_disable_msi(phba->pcidev); 9199 return; 9200 } 9201 9202 /** 9203 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device 9204 * @phba: pointer to lpfc hba data structure. 9205 * 9206 * This routine is invoked to enable device interrupt and associate driver's 9207 * interrupt handler(s) to interrupt vector(s) to device with SLI-4 9208 * interface spec. Depends on the interrupt mode configured to the driver, 9209 * the driver will try to fallback from the configured interrupt mode to an 9210 * interrupt mode which is supported by the platform, kernel, and device in 9211 * the order of: 9212 * MSI-X -> MSI -> IRQ. 9213 * 9214 * Return codes 9215 * 0 - successful 9216 * other values - error 9217 **/ 9218 static uint32_t 9219 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) 9220 { 9221 uint32_t intr_mode = LPFC_INTR_ERROR; 9222 int retval, index; 9223 9224 if (cfg_mode == 2) { 9225 /* Preparation before conf_msi mbox cmd */ 9226 retval = 0; 9227 if (!retval) { 9228 /* Now, try to enable MSI-X interrupt mode */ 9229 retval = lpfc_sli4_enable_msix(phba); 9230 if (!retval) { 9231 /* Indicate initialization to MSI-X mode */ 9232 phba->intr_type = MSIX; 9233 intr_mode = 2; 9234 } 9235 } 9236 } 9237 9238 /* Fallback to MSI if MSI-X initialization failed */ 9239 if (cfg_mode >= 1 && phba->intr_type == NONE) { 9240 retval = lpfc_sli4_enable_msi(phba); 9241 if (!retval) { 9242 /* Indicate initialization to MSI mode */ 9243 phba->intr_type = MSI; 9244 intr_mode = 1; 9245 } 9246 } 9247 9248 /* Fallback to INTx if both MSI-X/MSI initalization failed */ 9249 if (phba->intr_type == NONE) { 9250 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 9251 IRQF_SHARED, LPFC_DRIVER_NAME, phba); 9252 if (!retval) { 9253 /* Indicate initialization to INTx mode */ 9254 phba->intr_type = INTx; 9255 intr_mode = 0; 9256 for (index = 0; index < phba->cfg_fcp_io_channel; 9257 index++) { 9258 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9259 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9260 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9261 fcp_eq_in_use, 1); 9262 } 9263 if (phba->cfg_fof) { 9264 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 9265 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 9266 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. 9267 fcp_eq_in_use, 1); 9268 } 9269 } 9270 } 9271 return intr_mode; 9272 } 9273 9274 /** 9275 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device 9276 * @phba: pointer to lpfc hba data structure. 9277 * 9278 * This routine is invoked to disable device interrupt and disassociate 9279 * the driver's interrupt handler(s) from interrupt vector(s) to device 9280 * with SLI-4 interface spec. Depending on the interrupt mode, the driver 9281 * will release the interrupt vector(s) for the message signaled interrupt. 9282 **/ 9283 static void 9284 lpfc_sli4_disable_intr(struct lpfc_hba *phba) 9285 { 9286 /* Disable the currently initialized interrupt mode */ 9287 if (phba->intr_type == MSIX) 9288 lpfc_sli4_disable_msix(phba); 9289 else if (phba->intr_type == MSI) 9290 lpfc_sli4_disable_msi(phba); 9291 else if (phba->intr_type == INTx) 9292 free_irq(phba->pcidev->irq, phba); 9293 9294 /* Reset interrupt management states */ 9295 phba->intr_type = NONE; 9296 phba->sli.slistat.sli_intr = 0; 9297 9298 return; 9299 } 9300 9301 /** 9302 * lpfc_unset_hba - Unset SLI3 hba device initialization 9303 * @phba: pointer to lpfc hba data structure. 9304 * 9305 * This routine is invoked to unset the HBA device initialization steps to 9306 * a device with SLI-3 interface spec. 9307 **/ 9308 static void 9309 lpfc_unset_hba(struct lpfc_hba *phba) 9310 { 9311 struct lpfc_vport *vport = phba->pport; 9312 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9313 9314 spin_lock_irq(shost->host_lock); 9315 vport->load_flag |= FC_UNLOADING; 9316 spin_unlock_irq(shost->host_lock); 9317 9318 kfree(phba->vpi_bmask); 9319 kfree(phba->vpi_ids); 9320 9321 lpfc_stop_hba_timers(phba); 9322 9323 phba->pport->work_port_events = 0; 9324 9325 lpfc_sli_hba_down(phba); 9326 9327 lpfc_sli_brdrestart(phba); 9328 9329 lpfc_sli_disable_intr(phba); 9330 9331 return; 9332 } 9333 9334 /** 9335 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy 9336 * @phba: Pointer to HBA context object. 9337 * 9338 * This function is called in the SLI4 code path to wait for completion 9339 * of device's XRIs exchange busy. It will check the XRI exchange busy 9340 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after 9341 * that, it will check the XRI exchange busy on outstanding FCP and ELS 9342 * I/Os every 30 seconds, log error message, and wait forever. Only when 9343 * all XRI exchange busy complete, the driver unload shall proceed with 9344 * invoking the function reset ioctl mailbox command to the CNA and the 9345 * the rest of the driver unload resource release. 9346 **/ 9347 static void 9348 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) 9349 { 9350 int wait_time = 0; 9351 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9352 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9353 9354 while (!fcp_xri_cmpl || !els_xri_cmpl) { 9355 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { 9356 if (!fcp_xri_cmpl) 9357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9358 "2877 FCP XRI exchange busy " 9359 "wait time: %d seconds.\n", 9360 wait_time/1000); 9361 if (!els_xri_cmpl) 9362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9363 "2878 ELS XRI exchange busy " 9364 "wait time: %d seconds.\n", 9365 wait_time/1000); 9366 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); 9367 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; 9368 } else { 9369 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); 9370 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; 9371 } 9372 fcp_xri_cmpl = 9373 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); 9374 els_xri_cmpl = 9375 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); 9376 } 9377 } 9378 9379 /** 9380 * lpfc_sli4_hba_unset - Unset the fcoe hba 9381 * @phba: Pointer to HBA context object. 9382 * 9383 * This function is called in the SLI4 code path to reset the HBA's FCoE 9384 * function. The caller is not required to hold any lock. This routine 9385 * issues PCI function reset mailbox command to reset the FCoE function. 9386 * At the end of the function, it calls lpfc_hba_down_post function to 9387 * free any pending commands. 9388 **/ 9389 static void 9390 lpfc_sli4_hba_unset(struct lpfc_hba *phba) 9391 { 9392 int wait_cnt = 0; 9393 LPFC_MBOXQ_t *mboxq; 9394 struct pci_dev *pdev = phba->pcidev; 9395 9396 lpfc_stop_hba_timers(phba); 9397 phba->sli4_hba.intr_enable = 0; 9398 9399 /* 9400 * Gracefully wait out the potential current outstanding asynchronous 9401 * mailbox command. 9402 */ 9403 9404 /* First, block any pending async mailbox command from posted */ 9405 spin_lock_irq(&phba->hbalock); 9406 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9407 spin_unlock_irq(&phba->hbalock); 9408 /* Now, trying to wait it out if we can */ 9409 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9410 msleep(10); 9411 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) 9412 break; 9413 } 9414 /* Forcefully release the outstanding mailbox command if timed out */ 9415 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { 9416 spin_lock_irq(&phba->hbalock); 9417 mboxq = phba->sli.mbox_active; 9418 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 9419 __lpfc_mbox_cmpl_put(phba, mboxq); 9420 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9421 phba->sli.mbox_active = NULL; 9422 spin_unlock_irq(&phba->hbalock); 9423 } 9424 9425 /* Abort all iocbs associated with the hba */ 9426 lpfc_sli_hba_iocb_abort(phba); 9427 9428 /* Wait for completion of device XRI exchange busy */ 9429 lpfc_sli4_xri_exchange_busy_wait(phba); 9430 9431 /* Disable PCI subsystem interrupt */ 9432 lpfc_sli4_disable_intr(phba); 9433 9434 /* Disable SR-IOV if enabled */ 9435 if (phba->cfg_sriov_nr_virtfn) 9436 pci_disable_sriov(pdev); 9437 9438 /* Stop kthread signal shall trigger work_done one more time */ 9439 kthread_stop(phba->worker_thread); 9440 9441 /* Reset SLI4 HBA FCoE function */ 9442 lpfc_pci_function_reset(phba); 9443 lpfc_sli4_queue_destroy(phba); 9444 9445 /* Stop the SLI4 device port */ 9446 phba->pport->work_port_events = 0; 9447 } 9448 9449 /** 9450 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities. 9451 * @phba: Pointer to HBA context object. 9452 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9453 * 9454 * This function is called in the SLI4 code path to read the port's 9455 * sli4 capabilities. 9456 * 9457 * This function may be be called from any context that can block-wait 9458 * for the completion. The expectation is that this routine is called 9459 * typically from probe_one or from the online routine. 9460 **/ 9461 int 9462 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9463 { 9464 int rc; 9465 struct lpfc_mqe *mqe; 9466 struct lpfc_pc_sli4_params *sli4_params; 9467 uint32_t mbox_tmo; 9468 9469 rc = 0; 9470 mqe = &mboxq->u.mqe; 9471 9472 /* Read the port's SLI4 Parameters port capabilities */ 9473 lpfc_pc_sli4_params(mboxq); 9474 if (!phba->sli4_hba.intr_enable) 9475 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9476 else { 9477 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9478 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9479 } 9480 9481 if (unlikely(rc)) 9482 return 1; 9483 9484 sli4_params = &phba->sli4_hba.pc_sli4_params; 9485 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params); 9486 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params); 9487 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params); 9488 sli4_params->featurelevel_1 = bf_get(featurelevel_1, 9489 &mqe->un.sli4_params); 9490 sli4_params->featurelevel_2 = bf_get(featurelevel_2, 9491 &mqe->un.sli4_params); 9492 sli4_params->proto_types = mqe->un.sli4_params.word3; 9493 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len; 9494 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params); 9495 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params); 9496 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params); 9497 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params); 9498 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params); 9499 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params); 9500 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params); 9501 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params); 9502 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params); 9503 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params); 9504 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params); 9505 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params); 9506 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params); 9507 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params); 9508 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params); 9509 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params); 9510 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params); 9511 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params); 9512 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params); 9513 9514 /* Make sure that sge_supp_len can be handled by the driver */ 9515 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9516 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9517 9518 return rc; 9519 } 9520 9521 /** 9522 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. 9523 * @phba: Pointer to HBA context object. 9524 * @mboxq: Pointer to the mailboxq memory for the mailbox command response. 9525 * 9526 * This function is called in the SLI4 code path to read the port's 9527 * sli4 capabilities. 9528 * 9529 * This function may be be called from any context that can block-wait 9530 * for the completion. The expectation is that this routine is called 9531 * typically from probe_one or from the online routine. 9532 **/ 9533 int 9534 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 9535 { 9536 int rc; 9537 struct lpfc_mqe *mqe = &mboxq->u.mqe; 9538 struct lpfc_pc_sli4_params *sli4_params; 9539 uint32_t mbox_tmo; 9540 int length; 9541 struct lpfc_sli4_parameters *mbx_sli4_parameters; 9542 9543 /* 9544 * By default, the driver assumes the SLI4 port requires RPI 9545 * header postings. The SLI4_PARAM response will correct this 9546 * assumption. 9547 */ 9548 phba->sli4_hba.rpi_hdrs_in_use = 1; 9549 9550 /* Read the port's SLI4 Config Parameters */ 9551 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - 9552 sizeof(struct lpfc_sli4_cfg_mhdr)); 9553 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 9554 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, 9555 length, LPFC_SLI4_MBX_EMBED); 9556 if (!phba->sli4_hba.intr_enable) 9557 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 9558 else { 9559 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 9560 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 9561 } 9562 if (unlikely(rc)) 9563 return rc; 9564 sli4_params = &phba->sli4_hba.pc_sli4_params; 9565 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; 9566 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); 9567 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); 9568 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); 9569 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, 9570 mbx_sli4_parameters); 9571 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, 9572 mbx_sli4_parameters); 9573 if (bf_get(cfg_phwq, mbx_sli4_parameters)) 9574 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; 9575 else 9576 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; 9577 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; 9578 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); 9579 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); 9580 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); 9581 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); 9582 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); 9583 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); 9584 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); 9585 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, 9586 mbx_sli4_parameters); 9587 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, 9588 mbx_sli4_parameters); 9589 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); 9590 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); 9591 9592 /* Make sure that sge_supp_len can be handled by the driver */ 9593 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) 9594 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; 9595 9596 /* 9597 * Issue IOs with CDB embedded in WQE to minimized the number 9598 * of DMAs the firmware has to do. Setting this to 1 also forces 9599 * the driver to use 128 bytes WQEs for FCP IOs. 9600 */ 9601 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters)) 9602 phba->fcp_embed_io = 1; 9603 else 9604 phba->fcp_embed_io = 0; 9605 9606 /* 9607 * Check if the SLI port supports MDS Diagnostics 9608 */ 9609 if (bf_get(cfg_mds_diags, mbx_sli4_parameters)) 9610 phba->mds_diags_support = 1; 9611 else 9612 phba->mds_diags_support = 0; 9613 return 0; 9614 } 9615 9616 /** 9617 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. 9618 * @pdev: pointer to PCI device 9619 * @pid: pointer to PCI device identifier 9620 * 9621 * This routine is to be called to attach a device with SLI-3 interface spec 9622 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9623 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 9624 * information of the device and driver to see if the driver state that it can 9625 * support this kind of device. If the match is successful, the driver core 9626 * invokes this routine. If this routine determines it can claim the HBA, it 9627 * does all the initialization that it needs to do to handle the HBA properly. 9628 * 9629 * Return code 9630 * 0 - driver can claim the device 9631 * negative value - driver can not claim the device 9632 **/ 9633 static int 9634 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) 9635 { 9636 struct lpfc_hba *phba; 9637 struct lpfc_vport *vport = NULL; 9638 struct Scsi_Host *shost = NULL; 9639 int error; 9640 uint32_t cfg_mode, intr_mode; 9641 9642 /* Allocate memory for HBA structure */ 9643 phba = lpfc_hba_alloc(pdev); 9644 if (!phba) 9645 return -ENOMEM; 9646 9647 /* Perform generic PCI device enabling operation */ 9648 error = lpfc_enable_pci_dev(phba); 9649 if (error) 9650 goto out_free_phba; 9651 9652 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 9653 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 9654 if (error) 9655 goto out_disable_pci_dev; 9656 9657 /* Set up SLI-3 specific device PCI memory space */ 9658 error = lpfc_sli_pci_mem_setup(phba); 9659 if (error) { 9660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9661 "1402 Failed to set up pci memory space.\n"); 9662 goto out_disable_pci_dev; 9663 } 9664 9665 /* Set up phase-1 common device driver resources */ 9666 error = lpfc_setup_driver_resource_phase1(phba); 9667 if (error) { 9668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9669 "1403 Failed to set up driver resource.\n"); 9670 goto out_unset_pci_mem_s3; 9671 } 9672 9673 /* Set up SLI-3 specific device driver resources */ 9674 error = lpfc_sli_driver_resource_setup(phba); 9675 if (error) { 9676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9677 "1404 Failed to set up driver resource.\n"); 9678 goto out_unset_pci_mem_s3; 9679 } 9680 9681 /* Initialize and populate the iocb list per host */ 9682 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); 9683 if (error) { 9684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9685 "1405 Failed to initialize iocb list.\n"); 9686 goto out_unset_driver_resource_s3; 9687 } 9688 9689 /* Set up common device driver resources */ 9690 error = lpfc_setup_driver_resource_phase2(phba); 9691 if (error) { 9692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9693 "1406 Failed to set up driver resource.\n"); 9694 goto out_free_iocb_list; 9695 } 9696 9697 /* Get the default values for Model Name and Description */ 9698 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 9699 9700 /* Create SCSI host to the physical port */ 9701 error = lpfc_create_shost(phba); 9702 if (error) { 9703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9704 "1407 Failed to create scsi host.\n"); 9705 goto out_unset_driver_resource; 9706 } 9707 9708 /* Configure sysfs attributes */ 9709 vport = phba->pport; 9710 error = lpfc_alloc_sysfs_attr(vport); 9711 if (error) { 9712 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9713 "1476 Failed to allocate sysfs attr\n"); 9714 goto out_destroy_shost; 9715 } 9716 9717 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 9718 /* Now, trying to enable interrupt and bring up the device */ 9719 cfg_mode = phba->cfg_use_msi; 9720 while (true) { 9721 /* Put device to a known state before enabling interrupt */ 9722 lpfc_stop_port(phba); 9723 /* Configure and enable interrupt */ 9724 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); 9725 if (intr_mode == LPFC_INTR_ERROR) { 9726 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9727 "0431 Failed to enable interrupt.\n"); 9728 error = -ENODEV; 9729 goto out_free_sysfs_attr; 9730 } 9731 /* SLI-3 HBA setup */ 9732 if (lpfc_sli_hba_setup(phba)) { 9733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9734 "1477 Failed to set up hba\n"); 9735 error = -ENODEV; 9736 goto out_remove_device; 9737 } 9738 9739 /* Wait 50ms for the interrupts of previous mailbox commands */ 9740 msleep(50); 9741 /* Check active interrupts on message signaled interrupts */ 9742 if (intr_mode == 0 || 9743 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { 9744 /* Log the current active interrupt mode */ 9745 phba->intr_mode = intr_mode; 9746 lpfc_log_intr_mode(phba, intr_mode); 9747 break; 9748 } else { 9749 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9750 "0447 Configure interrupt mode (%d) " 9751 "failed active interrupt test.\n", 9752 intr_mode); 9753 /* Disable the current interrupt mode */ 9754 lpfc_sli_disable_intr(phba); 9755 /* Try next level of interrupt mode */ 9756 cfg_mode = --intr_mode; 9757 } 9758 } 9759 9760 /* Perform post initialization setup */ 9761 lpfc_post_init_setup(phba); 9762 9763 /* Check if there are static vports to be created. */ 9764 lpfc_create_static_vport(phba); 9765 9766 return 0; 9767 9768 out_remove_device: 9769 lpfc_unset_hba(phba); 9770 out_free_sysfs_attr: 9771 lpfc_free_sysfs_attr(vport); 9772 out_destroy_shost: 9773 lpfc_destroy_shost(phba); 9774 out_unset_driver_resource: 9775 lpfc_unset_driver_resource_phase2(phba); 9776 out_free_iocb_list: 9777 lpfc_free_iocb_list(phba); 9778 out_unset_driver_resource_s3: 9779 lpfc_sli_driver_resource_unset(phba); 9780 out_unset_pci_mem_s3: 9781 lpfc_sli_pci_mem_unset(phba); 9782 out_disable_pci_dev: 9783 lpfc_disable_pci_dev(phba); 9784 if (shost) 9785 scsi_host_put(shost); 9786 out_free_phba: 9787 lpfc_hba_free(phba); 9788 return error; 9789 } 9790 9791 /** 9792 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. 9793 * @pdev: pointer to PCI device 9794 * 9795 * This routine is to be called to disattach a device with SLI-3 interface 9796 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is 9797 * removed from PCI bus, it performs all the necessary cleanup for the HBA 9798 * device to be removed from the PCI subsystem properly. 9799 **/ 9800 static void 9801 lpfc_pci_remove_one_s3(struct pci_dev *pdev) 9802 { 9803 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9804 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 9805 struct lpfc_vport **vports; 9806 struct lpfc_hba *phba = vport->phba; 9807 int i; 9808 9809 spin_lock_irq(&phba->hbalock); 9810 vport->load_flag |= FC_UNLOADING; 9811 spin_unlock_irq(&phba->hbalock); 9812 9813 lpfc_free_sysfs_attr(vport); 9814 9815 /* Release all the vports against this physical port */ 9816 vports = lpfc_create_vport_work_array(phba); 9817 if (vports != NULL) 9818 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9819 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 9820 continue; 9821 fc_vport_terminate(vports[i]->fc_vport); 9822 } 9823 lpfc_destroy_vport_work_array(phba, vports); 9824 9825 /* Remove FC host and then SCSI host with the physical port */ 9826 fc_remove_host(shost); 9827 scsi_remove_host(shost); 9828 lpfc_cleanup(vport); 9829 9830 /* 9831 * Bring down the SLI Layer. This step disable all interrupts, 9832 * clears the rings, discards all mailbox commands, and resets 9833 * the HBA. 9834 */ 9835 9836 /* HBA interrupt will be disabled after this call */ 9837 lpfc_sli_hba_down(phba); 9838 /* Stop kthread signal shall trigger work_done one more time */ 9839 kthread_stop(phba->worker_thread); 9840 /* Final cleanup of txcmplq and reset the HBA */ 9841 lpfc_sli_brdrestart(phba); 9842 9843 kfree(phba->vpi_bmask); 9844 kfree(phba->vpi_ids); 9845 9846 lpfc_stop_hba_timers(phba); 9847 spin_lock_irq(&phba->hbalock); 9848 list_del_init(&vport->listentry); 9849 spin_unlock_irq(&phba->hbalock); 9850 9851 lpfc_debugfs_terminate(vport); 9852 9853 /* Disable SR-IOV if enabled */ 9854 if (phba->cfg_sriov_nr_virtfn) 9855 pci_disable_sriov(pdev); 9856 9857 /* Disable interrupt */ 9858 lpfc_sli_disable_intr(phba); 9859 9860 scsi_host_put(shost); 9861 9862 /* 9863 * Call scsi_free before mem_free since scsi bufs are released to their 9864 * corresponding pools here. 9865 */ 9866 lpfc_scsi_free(phba); 9867 lpfc_mem_free_all(phba); 9868 9869 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), 9870 phba->hbqslimp.virt, phba->hbqslimp.phys); 9871 9872 /* Free resources associated with SLI2 interface */ 9873 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, 9874 phba->slim2p.virt, phba->slim2p.phys); 9875 9876 /* unmap adapter SLIM and Control Registers */ 9877 iounmap(phba->ctrl_regs_memmap_p); 9878 iounmap(phba->slim_memmap_p); 9879 9880 lpfc_hba_free(phba); 9881 9882 pci_release_mem_regions(pdev); 9883 pci_disable_device(pdev); 9884 } 9885 9886 /** 9887 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt 9888 * @pdev: pointer to PCI device 9889 * @msg: power management message 9890 * 9891 * This routine is to be called from the kernel's PCI subsystem to support 9892 * system Power Management (PM) to device with SLI-3 interface spec. When 9893 * PM invokes this method, it quiesces the device by stopping the driver's 9894 * worker thread for the device, turning off device's interrupt and DMA, 9895 * and bring the device offline. Note that as the driver implements the 9896 * minimum PM requirements to a power-aware driver's PM support for the 9897 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 9898 * to the suspend() method call will be treated as SUSPEND and the driver will 9899 * fully reinitialize its device during resume() method call, the driver will 9900 * set device to PCI_D3hot state in PCI config space instead of setting it 9901 * according to the @msg provided by the PM. 9902 * 9903 * Return code 9904 * 0 - driver suspended the device 9905 * Error otherwise 9906 **/ 9907 static int 9908 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) 9909 { 9910 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9911 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9912 9913 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9914 "0473 PCI device Power Management suspend.\n"); 9915 9916 /* Bring down the device */ 9917 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 9918 lpfc_offline(phba); 9919 kthread_stop(phba->worker_thread); 9920 9921 /* Disable interrupt from device */ 9922 lpfc_sli_disable_intr(phba); 9923 9924 /* Save device state to PCI config space */ 9925 pci_save_state(pdev); 9926 pci_set_power_state(pdev, PCI_D3hot); 9927 9928 return 0; 9929 } 9930 9931 /** 9932 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt 9933 * @pdev: pointer to PCI device 9934 * 9935 * This routine is to be called from the kernel's PCI subsystem to support 9936 * system Power Management (PM) to device with SLI-3 interface spec. When PM 9937 * invokes this method, it restores the device's PCI config space state and 9938 * fully reinitializes the device and brings it online. Note that as the 9939 * driver implements the minimum PM requirements to a power-aware driver's 9940 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, 9941 * FREEZE) to the suspend() method call will be treated as SUSPEND and the 9942 * driver will fully reinitialize its device during resume() method call, 9943 * the device will be set to PCI_D0 directly in PCI config space before 9944 * restoring the state. 9945 * 9946 * Return code 9947 * 0 - driver suspended the device 9948 * Error otherwise 9949 **/ 9950 static int 9951 lpfc_pci_resume_one_s3(struct pci_dev *pdev) 9952 { 9953 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9954 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 9955 uint32_t intr_mode; 9956 int error; 9957 9958 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 9959 "0452 PCI device Power Management resume.\n"); 9960 9961 /* Restore device state from PCI config space */ 9962 pci_set_power_state(pdev, PCI_D0); 9963 pci_restore_state(pdev); 9964 9965 /* 9966 * As the new kernel behavior of pci_restore_state() API call clears 9967 * device saved_state flag, need to save the restored state again. 9968 */ 9969 pci_save_state(pdev); 9970 9971 if (pdev->is_busmaster) 9972 pci_set_master(pdev); 9973 9974 /* Startup the kernel thread for this host adapter. */ 9975 phba->worker_thread = kthread_run(lpfc_do_work, phba, 9976 "lpfc_worker_%d", phba->brd_no); 9977 if (IS_ERR(phba->worker_thread)) { 9978 error = PTR_ERR(phba->worker_thread); 9979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9980 "0434 PM resume failed to start worker " 9981 "thread: error=x%x.\n", error); 9982 return error; 9983 } 9984 9985 /* Configure and enable interrupt */ 9986 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 9987 if (intr_mode == LPFC_INTR_ERROR) { 9988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9989 "0430 PM resume Failed to enable interrupt\n"); 9990 return -EIO; 9991 } else 9992 phba->intr_mode = intr_mode; 9993 9994 /* Restart HBA and bring it online */ 9995 lpfc_sli_brdrestart(phba); 9996 lpfc_online(phba); 9997 9998 /* Log the current active interrupt mode */ 9999 lpfc_log_intr_mode(phba, phba->intr_mode); 10000 10001 return 0; 10002 } 10003 10004 /** 10005 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover 10006 * @phba: pointer to lpfc hba data structure. 10007 * 10008 * This routine is called to prepare the SLI3 device for PCI slot recover. It 10009 * aborts all the outstanding SCSI I/Os to the pci device. 10010 **/ 10011 static void 10012 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) 10013 { 10014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10015 "2723 PCI channel I/O abort preparing for recovery\n"); 10016 10017 /* 10018 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10019 * and let the SCSI mid-layer to retry them to recover. 10020 */ 10021 lpfc_sli_abort_fcp_rings(phba); 10022 } 10023 10024 /** 10025 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset 10026 * @phba: pointer to lpfc hba data structure. 10027 * 10028 * This routine is called to prepare the SLI3 device for PCI slot reset. It 10029 * disables the device interrupt and pci device, and aborts the internal FCP 10030 * pending I/Os. 10031 **/ 10032 static void 10033 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) 10034 { 10035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10036 "2710 PCI channel disable preparing for reset\n"); 10037 10038 /* Block any management I/Os to the device */ 10039 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT); 10040 10041 /* Block all SCSI devices' I/Os on the host */ 10042 lpfc_scsi_dev_block(phba); 10043 10044 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10045 lpfc_sli_flush_fcp_rings(phba); 10046 10047 /* stop all timers */ 10048 lpfc_stop_hba_timers(phba); 10049 10050 /* Disable interrupt and pci device */ 10051 lpfc_sli_disable_intr(phba); 10052 pci_disable_device(phba->pcidev); 10053 } 10054 10055 /** 10056 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable 10057 * @phba: pointer to lpfc hba data structure. 10058 * 10059 * This routine is called to prepare the SLI3 device for PCI slot permanently 10060 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10061 * pending I/Os. 10062 **/ 10063 static void 10064 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10065 { 10066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10067 "2711 PCI channel permanent disable for failure\n"); 10068 /* Block all SCSI devices' I/Os on the host */ 10069 lpfc_scsi_dev_block(phba); 10070 10071 /* stop all timers */ 10072 lpfc_stop_hba_timers(phba); 10073 10074 /* Clean up all driver's outstanding SCSI I/Os */ 10075 lpfc_sli_flush_fcp_rings(phba); 10076 } 10077 10078 /** 10079 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error 10080 * @pdev: pointer to PCI device. 10081 * @state: the current PCI connection state. 10082 * 10083 * This routine is called from the PCI subsystem for I/O error handling to 10084 * device with SLI-3 interface spec. This function is called by the PCI 10085 * subsystem after a PCI bus error affecting this device has been detected. 10086 * When this function is invoked, it will need to stop all the I/Os and 10087 * interrupt(s) to the device. Once that is done, it will return 10088 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery 10089 * as desired. 10090 * 10091 * Return codes 10092 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link 10093 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10094 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10095 **/ 10096 static pci_ers_result_t 10097 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) 10098 { 10099 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10100 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10101 10102 switch (state) { 10103 case pci_channel_io_normal: 10104 /* Non-fatal error, prepare for recovery */ 10105 lpfc_sli_prep_dev_for_recover(phba); 10106 return PCI_ERS_RESULT_CAN_RECOVER; 10107 case pci_channel_io_frozen: 10108 /* Fatal error, prepare for slot reset */ 10109 lpfc_sli_prep_dev_for_reset(phba); 10110 return PCI_ERS_RESULT_NEED_RESET; 10111 case pci_channel_io_perm_failure: 10112 /* Permanent failure, prepare for device down */ 10113 lpfc_sli_prep_dev_for_perm_failure(phba); 10114 return PCI_ERS_RESULT_DISCONNECT; 10115 default: 10116 /* Unknown state, prepare and request slot reset */ 10117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10118 "0472 Unknown PCI error state: x%x\n", state); 10119 lpfc_sli_prep_dev_for_reset(phba); 10120 return PCI_ERS_RESULT_NEED_RESET; 10121 } 10122 } 10123 10124 /** 10125 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. 10126 * @pdev: pointer to PCI device. 10127 * 10128 * This routine is called from the PCI subsystem for error handling to 10129 * device with SLI-3 interface spec. This is called after PCI bus has been 10130 * reset to restart the PCI card from scratch, as if from a cold-boot. 10131 * During the PCI subsystem error recovery, after driver returns 10132 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10133 * recovery and then call this routine before calling the .resume method 10134 * to recover the device. This function will initialize the HBA device, 10135 * enable the interrupt, but it will just put the HBA to offline state 10136 * without passing any I/O traffic. 10137 * 10138 * Return codes 10139 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10140 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10141 */ 10142 static pci_ers_result_t 10143 lpfc_io_slot_reset_s3(struct pci_dev *pdev) 10144 { 10145 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10146 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10147 struct lpfc_sli *psli = &phba->sli; 10148 uint32_t intr_mode; 10149 10150 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10151 if (pci_enable_device_mem(pdev)) { 10152 printk(KERN_ERR "lpfc: Cannot re-enable " 10153 "PCI device after reset.\n"); 10154 return PCI_ERS_RESULT_DISCONNECT; 10155 } 10156 10157 pci_restore_state(pdev); 10158 10159 /* 10160 * As the new kernel behavior of pci_restore_state() API call clears 10161 * device saved_state flag, need to save the restored state again. 10162 */ 10163 pci_save_state(pdev); 10164 10165 if (pdev->is_busmaster) 10166 pci_set_master(pdev); 10167 10168 spin_lock_irq(&phba->hbalock); 10169 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10170 spin_unlock_irq(&phba->hbalock); 10171 10172 /* Configure and enable interrupt */ 10173 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); 10174 if (intr_mode == LPFC_INTR_ERROR) { 10175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10176 "0427 Cannot re-enable interrupt after " 10177 "slot reset.\n"); 10178 return PCI_ERS_RESULT_DISCONNECT; 10179 } else 10180 phba->intr_mode = intr_mode; 10181 10182 /* Take device offline, it will perform cleanup */ 10183 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10184 lpfc_offline(phba); 10185 lpfc_sli_brdrestart(phba); 10186 10187 /* Log the current active interrupt mode */ 10188 lpfc_log_intr_mode(phba, phba->intr_mode); 10189 10190 return PCI_ERS_RESULT_RECOVERED; 10191 } 10192 10193 /** 10194 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. 10195 * @pdev: pointer to PCI device 10196 * 10197 * This routine is called from the PCI subsystem for error handling to device 10198 * with SLI-3 interface spec. It is called when kernel error recovery tells 10199 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10200 * error recovery. After this call, traffic can start to flow from this device 10201 * again. 10202 */ 10203 static void 10204 lpfc_io_resume_s3(struct pci_dev *pdev) 10205 { 10206 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10207 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10208 10209 /* Bring device online, it will be no-op for non-fatal error resume */ 10210 lpfc_online(phba); 10211 10212 /* Clean up Advanced Error Reporting (AER) if needed */ 10213 if (phba->hba_flag & HBA_AER_ENABLED) 10214 pci_cleanup_aer_uncorrect_error_status(pdev); 10215 } 10216 10217 /** 10218 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve 10219 * @phba: pointer to lpfc hba data structure. 10220 * 10221 * returns the number of ELS/CT IOCBs to reserve 10222 **/ 10223 int 10224 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) 10225 { 10226 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 10227 10228 if (phba->sli_rev == LPFC_SLI_REV4) { 10229 if (max_xri <= 100) 10230 return 10; 10231 else if (max_xri <= 256) 10232 return 25; 10233 else if (max_xri <= 512) 10234 return 50; 10235 else if (max_xri <= 1024) 10236 return 100; 10237 else if (max_xri <= 1536) 10238 return 150; 10239 else if (max_xri <= 2048) 10240 return 200; 10241 else 10242 return 250; 10243 } else 10244 return 0; 10245 } 10246 10247 /** 10248 * lpfc_write_firmware - attempt to write a firmware image to the port 10249 * @fw: pointer to firmware image returned from request_firmware. 10250 * @phba: pointer to lpfc hba data structure. 10251 * 10252 **/ 10253 static void 10254 lpfc_write_firmware(const struct firmware *fw, void *context) 10255 { 10256 struct lpfc_hba *phba = (struct lpfc_hba *)context; 10257 char fwrev[FW_REV_STR_SIZE]; 10258 struct lpfc_grp_hdr *image; 10259 struct list_head dma_buffer_list; 10260 int i, rc = 0; 10261 struct lpfc_dmabuf *dmabuf, *next; 10262 uint32_t offset = 0, temp_offset = 0; 10263 10264 /* It can be null in no-wait mode, sanity check */ 10265 if (!fw) { 10266 rc = -ENXIO; 10267 goto out; 10268 } 10269 image = (struct lpfc_grp_hdr *)fw->data; 10270 10271 INIT_LIST_HEAD(&dma_buffer_list); 10272 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || 10273 (bf_get_be32(lpfc_grp_hdr_file_type, image) != 10274 LPFC_FILE_TYPE_GROUP) || 10275 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 10276 (be32_to_cpu(image->size) != fw->size)) { 10277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10278 "3022 Invalid FW image found. " 10279 "Magic:%x Type:%x ID:%x\n", 10280 be32_to_cpu(image->magic_number), 10281 bf_get_be32(lpfc_grp_hdr_file_type, image), 10282 bf_get_be32(lpfc_grp_hdr_id, image)); 10283 rc = -EINVAL; 10284 goto release_out; 10285 } 10286 lpfc_decode_firmware_rev(phba, fwrev, 1); 10287 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { 10288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10289 "3023 Updating Firmware, Current Version:%s " 10290 "New Version:%s\n", 10291 fwrev, image->revision); 10292 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { 10293 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 10294 GFP_KERNEL); 10295 if (!dmabuf) { 10296 rc = -ENOMEM; 10297 goto release_out; 10298 } 10299 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10300 SLI4_PAGE_SIZE, 10301 &dmabuf->phys, 10302 GFP_KERNEL); 10303 if (!dmabuf->virt) { 10304 kfree(dmabuf); 10305 rc = -ENOMEM; 10306 goto release_out; 10307 } 10308 list_add_tail(&dmabuf->list, &dma_buffer_list); 10309 } 10310 while (offset < fw->size) { 10311 temp_offset = offset; 10312 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 10313 if (temp_offset + SLI4_PAGE_SIZE > fw->size) { 10314 memcpy(dmabuf->virt, 10315 fw->data + temp_offset, 10316 fw->size - temp_offset); 10317 temp_offset = fw->size; 10318 break; 10319 } 10320 memcpy(dmabuf->virt, fw->data + temp_offset, 10321 SLI4_PAGE_SIZE); 10322 temp_offset += SLI4_PAGE_SIZE; 10323 } 10324 rc = lpfc_wr_object(phba, &dma_buffer_list, 10325 (fw->size - offset), &offset); 10326 if (rc) 10327 goto release_out; 10328 } 10329 rc = offset; 10330 } 10331 10332 release_out: 10333 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { 10334 list_del(&dmabuf->list); 10335 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 10336 dmabuf->virt, dmabuf->phys); 10337 kfree(dmabuf); 10338 } 10339 release_firmware(fw); 10340 out: 10341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10342 "3024 Firmware update done: %d.\n", rc); 10343 return; 10344 } 10345 10346 /** 10347 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade 10348 * @phba: pointer to lpfc hba data structure. 10349 * 10350 * This routine is called to perform Linux generic firmware upgrade on device 10351 * that supports such feature. 10352 **/ 10353 int 10354 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade) 10355 { 10356 uint8_t file_name[ELX_MODEL_NAME_SIZE]; 10357 int ret; 10358 const struct firmware *fw; 10359 10360 /* Only supported on SLI4 interface type 2 for now */ 10361 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 10362 LPFC_SLI_INTF_IF_TYPE_2) 10363 return -EPERM; 10364 10365 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName); 10366 10367 if (fw_upgrade == INT_FW_UPGRADE) { 10368 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 10369 file_name, &phba->pcidev->dev, 10370 GFP_KERNEL, (void *)phba, 10371 lpfc_write_firmware); 10372 } else if (fw_upgrade == RUN_FW_UPGRADE) { 10373 ret = request_firmware(&fw, file_name, &phba->pcidev->dev); 10374 if (!ret) 10375 lpfc_write_firmware(fw, (void *)phba); 10376 } else { 10377 ret = -EINVAL; 10378 } 10379 10380 return ret; 10381 } 10382 10383 /** 10384 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys 10385 * @pdev: pointer to PCI device 10386 * @pid: pointer to PCI device identifier 10387 * 10388 * This routine is called from the kernel's PCI subsystem to device with 10389 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10390 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific 10391 * information of the device and driver to see if the driver state that it 10392 * can support this kind of device. If the match is successful, the driver 10393 * core invokes this routine. If this routine determines it can claim the HBA, 10394 * it does all the initialization that it needs to do to handle the HBA 10395 * properly. 10396 * 10397 * Return code 10398 * 0 - driver can claim the device 10399 * negative value - driver can not claim the device 10400 **/ 10401 static int 10402 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) 10403 { 10404 struct lpfc_hba *phba; 10405 struct lpfc_vport *vport = NULL; 10406 struct Scsi_Host *shost = NULL; 10407 int error; 10408 uint32_t cfg_mode, intr_mode; 10409 int adjusted_fcp_io_channel; 10410 10411 /* Allocate memory for HBA structure */ 10412 phba = lpfc_hba_alloc(pdev); 10413 if (!phba) 10414 return -ENOMEM; 10415 10416 /* Perform generic PCI device enabling operation */ 10417 error = lpfc_enable_pci_dev(phba); 10418 if (error) 10419 goto out_free_phba; 10420 10421 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 10422 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 10423 if (error) 10424 goto out_disable_pci_dev; 10425 10426 /* Set up SLI-4 specific device PCI memory space */ 10427 error = lpfc_sli4_pci_mem_setup(phba); 10428 if (error) { 10429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10430 "1410 Failed to set up pci memory space.\n"); 10431 goto out_disable_pci_dev; 10432 } 10433 10434 /* Set up phase-1 common device driver resources */ 10435 error = lpfc_setup_driver_resource_phase1(phba); 10436 if (error) { 10437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10438 "1411 Failed to set up driver resource.\n"); 10439 goto out_unset_pci_mem_s4; 10440 } 10441 10442 /* Set up SLI-4 Specific device driver resources */ 10443 error = lpfc_sli4_driver_resource_setup(phba); 10444 if (error) { 10445 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10446 "1412 Failed to set up driver resource.\n"); 10447 goto out_unset_pci_mem_s4; 10448 } 10449 10450 /* Initialize and populate the iocb list per host */ 10451 10452 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10453 "2821 initialize iocb list %d.\n", 10454 phba->cfg_iocb_cnt*1024); 10455 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024); 10456 10457 if (error) { 10458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10459 "1413 Failed to initialize iocb list.\n"); 10460 goto out_unset_driver_resource_s4; 10461 } 10462 10463 INIT_LIST_HEAD(&phba->active_rrq_list); 10464 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list); 10465 10466 /* Set up common device driver resources */ 10467 error = lpfc_setup_driver_resource_phase2(phba); 10468 if (error) { 10469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10470 "1414 Failed to set up driver resource.\n"); 10471 goto out_free_iocb_list; 10472 } 10473 10474 /* Get the default values for Model Name and Description */ 10475 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); 10476 10477 /* Create SCSI host to the physical port */ 10478 error = lpfc_create_shost(phba); 10479 if (error) { 10480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10481 "1415 Failed to create scsi host.\n"); 10482 goto out_unset_driver_resource; 10483 } 10484 10485 /* Configure sysfs attributes */ 10486 vport = phba->pport; 10487 error = lpfc_alloc_sysfs_attr(vport); 10488 if (error) { 10489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10490 "1416 Failed to allocate sysfs attr\n"); 10491 goto out_destroy_shost; 10492 } 10493 10494 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ 10495 /* Now, trying to enable interrupt and bring up the device */ 10496 cfg_mode = phba->cfg_use_msi; 10497 10498 /* Put device to a known state before enabling interrupt */ 10499 lpfc_stop_port(phba); 10500 /* Configure and enable interrupt */ 10501 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); 10502 if (intr_mode == LPFC_INTR_ERROR) { 10503 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10504 "0426 Failed to enable interrupt.\n"); 10505 error = -ENODEV; 10506 goto out_free_sysfs_attr; 10507 } 10508 /* Default to single EQ for non-MSI-X */ 10509 if (phba->intr_type != MSIX) 10510 adjusted_fcp_io_channel = 1; 10511 else 10512 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel; 10513 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel; 10514 /* Set up SLI-4 HBA */ 10515 if (lpfc_sli4_hba_setup(phba)) { 10516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10517 "1421 Failed to set up hba\n"); 10518 error = -ENODEV; 10519 goto out_disable_intr; 10520 } 10521 10522 /* Log the current active interrupt mode */ 10523 phba->intr_mode = intr_mode; 10524 lpfc_log_intr_mode(phba, intr_mode); 10525 10526 /* Perform post initialization setup */ 10527 lpfc_post_init_setup(phba); 10528 10529 /* check for firmware upgrade or downgrade */ 10530 if (phba->cfg_request_firmware_upgrade) 10531 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE); 10532 10533 /* Check if there are static vports to be created. */ 10534 lpfc_create_static_vport(phba); 10535 return 0; 10536 10537 out_disable_intr: 10538 lpfc_sli4_disable_intr(phba); 10539 out_free_sysfs_attr: 10540 lpfc_free_sysfs_attr(vport); 10541 out_destroy_shost: 10542 lpfc_destroy_shost(phba); 10543 out_unset_driver_resource: 10544 lpfc_unset_driver_resource_phase2(phba); 10545 out_free_iocb_list: 10546 lpfc_free_iocb_list(phba); 10547 out_unset_driver_resource_s4: 10548 lpfc_sli4_driver_resource_unset(phba); 10549 out_unset_pci_mem_s4: 10550 lpfc_sli4_pci_mem_unset(phba); 10551 out_disable_pci_dev: 10552 lpfc_disable_pci_dev(phba); 10553 if (shost) 10554 scsi_host_put(shost); 10555 out_free_phba: 10556 lpfc_hba_free(phba); 10557 return error; 10558 } 10559 10560 /** 10561 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem 10562 * @pdev: pointer to PCI device 10563 * 10564 * This routine is called from the kernel's PCI subsystem to device with 10565 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is 10566 * removed from PCI bus, it performs all the necessary cleanup for the HBA 10567 * device to be removed from the PCI subsystem properly. 10568 **/ 10569 static void 10570 lpfc_pci_remove_one_s4(struct pci_dev *pdev) 10571 { 10572 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10573 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 10574 struct lpfc_vport **vports; 10575 struct lpfc_hba *phba = vport->phba; 10576 int i; 10577 10578 /* Mark the device unloading flag */ 10579 spin_lock_irq(&phba->hbalock); 10580 vport->load_flag |= FC_UNLOADING; 10581 spin_unlock_irq(&phba->hbalock); 10582 10583 /* Free the HBA sysfs attributes */ 10584 lpfc_free_sysfs_attr(vport); 10585 10586 /* Release all the vports against this physical port */ 10587 vports = lpfc_create_vport_work_array(phba); 10588 if (vports != NULL) 10589 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10590 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 10591 continue; 10592 fc_vport_terminate(vports[i]->fc_vport); 10593 } 10594 lpfc_destroy_vport_work_array(phba, vports); 10595 10596 /* Remove FC host and then SCSI host with the physical port */ 10597 fc_remove_host(shost); 10598 scsi_remove_host(shost); 10599 10600 /* Perform cleanup on the physical port */ 10601 lpfc_cleanup(vport); 10602 10603 /* 10604 * Bring down the SLI Layer. This step disables all interrupts, 10605 * clears the rings, discards all mailbox commands, and resets 10606 * the HBA FCoE function. 10607 */ 10608 lpfc_debugfs_terminate(vport); 10609 lpfc_sli4_hba_unset(phba); 10610 10611 spin_lock_irq(&phba->hbalock); 10612 list_del_init(&vport->listentry); 10613 spin_unlock_irq(&phba->hbalock); 10614 10615 /* Perform scsi free before driver resource_unset since scsi 10616 * buffers are released to their corresponding pools here. 10617 */ 10618 lpfc_scsi_free(phba); 10619 10620 lpfc_sli4_driver_resource_unset(phba); 10621 10622 /* Unmap adapter Control and Doorbell registers */ 10623 lpfc_sli4_pci_mem_unset(phba); 10624 10625 /* Release PCI resources and disable device's PCI function */ 10626 scsi_host_put(shost); 10627 lpfc_disable_pci_dev(phba); 10628 10629 /* Finally, free the driver's device data structure */ 10630 lpfc_hba_free(phba); 10631 10632 return; 10633 } 10634 10635 /** 10636 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt 10637 * @pdev: pointer to PCI device 10638 * @msg: power management message 10639 * 10640 * This routine is called from the kernel's PCI subsystem to support system 10641 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes 10642 * this method, it quiesces the device by stopping the driver's worker 10643 * thread for the device, turning off device's interrupt and DMA, and bring 10644 * the device offline. Note that as the driver implements the minimum PM 10645 * requirements to a power-aware driver's PM support for suspend/resume -- all 10646 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() 10647 * method call will be treated as SUSPEND and the driver will fully 10648 * reinitialize its device during resume() method call, the driver will set 10649 * device to PCI_D3hot state in PCI config space instead of setting it 10650 * according to the @msg provided by the PM. 10651 * 10652 * Return code 10653 * 0 - driver suspended the device 10654 * Error otherwise 10655 **/ 10656 static int 10657 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) 10658 { 10659 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10660 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10661 10662 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10663 "2843 PCI device Power Management suspend.\n"); 10664 10665 /* Bring down the device */ 10666 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10667 lpfc_offline(phba); 10668 kthread_stop(phba->worker_thread); 10669 10670 /* Disable interrupt from device */ 10671 lpfc_sli4_disable_intr(phba); 10672 lpfc_sli4_queue_destroy(phba); 10673 10674 /* Save device state to PCI config space */ 10675 pci_save_state(pdev); 10676 pci_set_power_state(pdev, PCI_D3hot); 10677 10678 return 0; 10679 } 10680 10681 /** 10682 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt 10683 * @pdev: pointer to PCI device 10684 * 10685 * This routine is called from the kernel's PCI subsystem to support system 10686 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes 10687 * this method, it restores the device's PCI config space state and fully 10688 * reinitializes the device and brings it online. Note that as the driver 10689 * implements the minimum PM requirements to a power-aware driver's PM for 10690 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) 10691 * to the suspend() method call will be treated as SUSPEND and the driver 10692 * will fully reinitialize its device during resume() method call, the device 10693 * will be set to PCI_D0 directly in PCI config space before restoring the 10694 * state. 10695 * 10696 * Return code 10697 * 0 - driver suspended the device 10698 * Error otherwise 10699 **/ 10700 static int 10701 lpfc_pci_resume_one_s4(struct pci_dev *pdev) 10702 { 10703 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10704 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10705 uint32_t intr_mode; 10706 int error; 10707 10708 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10709 "0292 PCI device Power Management resume.\n"); 10710 10711 /* Restore device state from PCI config space */ 10712 pci_set_power_state(pdev, PCI_D0); 10713 pci_restore_state(pdev); 10714 10715 /* 10716 * As the new kernel behavior of pci_restore_state() API call clears 10717 * device saved_state flag, need to save the restored state again. 10718 */ 10719 pci_save_state(pdev); 10720 10721 if (pdev->is_busmaster) 10722 pci_set_master(pdev); 10723 10724 /* Startup the kernel thread for this host adapter. */ 10725 phba->worker_thread = kthread_run(lpfc_do_work, phba, 10726 "lpfc_worker_%d", phba->brd_no); 10727 if (IS_ERR(phba->worker_thread)) { 10728 error = PTR_ERR(phba->worker_thread); 10729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10730 "0293 PM resume failed to start worker " 10731 "thread: error=x%x.\n", error); 10732 return error; 10733 } 10734 10735 /* Configure and enable interrupt */ 10736 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10737 if (intr_mode == LPFC_INTR_ERROR) { 10738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10739 "0294 PM resume Failed to enable interrupt\n"); 10740 return -EIO; 10741 } else 10742 phba->intr_mode = intr_mode; 10743 10744 /* Restart HBA and bring it online */ 10745 lpfc_sli_brdrestart(phba); 10746 lpfc_online(phba); 10747 10748 /* Log the current active interrupt mode */ 10749 lpfc_log_intr_mode(phba, phba->intr_mode); 10750 10751 return 0; 10752 } 10753 10754 /** 10755 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover 10756 * @phba: pointer to lpfc hba data structure. 10757 * 10758 * This routine is called to prepare the SLI4 device for PCI slot recover. It 10759 * aborts all the outstanding SCSI I/Os to the pci device. 10760 **/ 10761 static void 10762 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) 10763 { 10764 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10765 "2828 PCI channel I/O abort preparing for recovery\n"); 10766 /* 10767 * There may be errored I/Os through HBA, abort all I/Os on txcmplq 10768 * and let the SCSI mid-layer to retry them to recover. 10769 */ 10770 lpfc_sli_abort_fcp_rings(phba); 10771 } 10772 10773 /** 10774 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset 10775 * @phba: pointer to lpfc hba data structure. 10776 * 10777 * This routine is called to prepare the SLI4 device for PCI slot reset. It 10778 * disables the device interrupt and pci device, and aborts the internal FCP 10779 * pending I/Os. 10780 **/ 10781 static void 10782 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) 10783 { 10784 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10785 "2826 PCI channel disable preparing for reset\n"); 10786 10787 /* Block any management I/Os to the device */ 10788 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT); 10789 10790 /* Block all SCSI devices' I/Os on the host */ 10791 lpfc_scsi_dev_block(phba); 10792 10793 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 10794 lpfc_sli_flush_fcp_rings(phba); 10795 10796 /* stop all timers */ 10797 lpfc_stop_hba_timers(phba); 10798 10799 /* Disable interrupt and pci device */ 10800 lpfc_sli4_disable_intr(phba); 10801 lpfc_sli4_queue_destroy(phba); 10802 pci_disable_device(phba->pcidev); 10803 } 10804 10805 /** 10806 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable 10807 * @phba: pointer to lpfc hba data structure. 10808 * 10809 * This routine is called to prepare the SLI4 device for PCI slot permanently 10810 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP 10811 * pending I/Os. 10812 **/ 10813 static void 10814 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) 10815 { 10816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10817 "2827 PCI channel permanent disable for failure\n"); 10818 10819 /* Block all SCSI devices' I/Os on the host */ 10820 lpfc_scsi_dev_block(phba); 10821 10822 /* stop all timers */ 10823 lpfc_stop_hba_timers(phba); 10824 10825 /* Clean up all driver's outstanding SCSI I/Os */ 10826 lpfc_sli_flush_fcp_rings(phba); 10827 } 10828 10829 /** 10830 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device 10831 * @pdev: pointer to PCI device. 10832 * @state: the current PCI connection state. 10833 * 10834 * This routine is called from the PCI subsystem for error handling to device 10835 * with SLI-4 interface spec. This function is called by the PCI subsystem 10836 * after a PCI bus error affecting this device has been detected. When this 10837 * function is invoked, it will need to stop all the I/Os and interrupt(s) 10838 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET 10839 * for the PCI subsystem to perform proper recovery as desired. 10840 * 10841 * Return codes 10842 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 10843 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10844 **/ 10845 static pci_ers_result_t 10846 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) 10847 { 10848 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10849 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10850 10851 switch (state) { 10852 case pci_channel_io_normal: 10853 /* Non-fatal error, prepare for recovery */ 10854 lpfc_sli4_prep_dev_for_recover(phba); 10855 return PCI_ERS_RESULT_CAN_RECOVER; 10856 case pci_channel_io_frozen: 10857 /* Fatal error, prepare for slot reset */ 10858 lpfc_sli4_prep_dev_for_reset(phba); 10859 return PCI_ERS_RESULT_NEED_RESET; 10860 case pci_channel_io_perm_failure: 10861 /* Permanent failure, prepare for device down */ 10862 lpfc_sli4_prep_dev_for_perm_failure(phba); 10863 return PCI_ERS_RESULT_DISCONNECT; 10864 default: 10865 /* Unknown state, prepare and request slot reset */ 10866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10867 "2825 Unknown PCI error state: x%x\n", state); 10868 lpfc_sli4_prep_dev_for_reset(phba); 10869 return PCI_ERS_RESULT_NEED_RESET; 10870 } 10871 } 10872 10873 /** 10874 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch 10875 * @pdev: pointer to PCI device. 10876 * 10877 * This routine is called from the PCI subsystem for error handling to device 10878 * with SLI-4 interface spec. It is called after PCI bus has been reset to 10879 * restart the PCI card from scratch, as if from a cold-boot. During the 10880 * PCI subsystem error recovery, after the driver returns 10881 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error 10882 * recovery and then call this routine before calling the .resume method to 10883 * recover the device. This function will initialize the HBA device, enable 10884 * the interrupt, but it will just put the HBA to offline state without 10885 * passing any I/O traffic. 10886 * 10887 * Return codes 10888 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 10889 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 10890 */ 10891 static pci_ers_result_t 10892 lpfc_io_slot_reset_s4(struct pci_dev *pdev) 10893 { 10894 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10895 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10896 struct lpfc_sli *psli = &phba->sli; 10897 uint32_t intr_mode; 10898 10899 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); 10900 if (pci_enable_device_mem(pdev)) { 10901 printk(KERN_ERR "lpfc: Cannot re-enable " 10902 "PCI device after reset.\n"); 10903 return PCI_ERS_RESULT_DISCONNECT; 10904 } 10905 10906 pci_restore_state(pdev); 10907 10908 /* 10909 * As the new kernel behavior of pci_restore_state() API call clears 10910 * device saved_state flag, need to save the restored state again. 10911 */ 10912 pci_save_state(pdev); 10913 10914 if (pdev->is_busmaster) 10915 pci_set_master(pdev); 10916 10917 spin_lock_irq(&phba->hbalock); 10918 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 10919 spin_unlock_irq(&phba->hbalock); 10920 10921 /* Configure and enable interrupt */ 10922 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); 10923 if (intr_mode == LPFC_INTR_ERROR) { 10924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10925 "2824 Cannot re-enable interrupt after " 10926 "slot reset.\n"); 10927 return PCI_ERS_RESULT_DISCONNECT; 10928 } else 10929 phba->intr_mode = intr_mode; 10930 10931 /* Log the current active interrupt mode */ 10932 lpfc_log_intr_mode(phba, phba->intr_mode); 10933 10934 return PCI_ERS_RESULT_RECOVERED; 10935 } 10936 10937 /** 10938 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device 10939 * @pdev: pointer to PCI device 10940 * 10941 * This routine is called from the PCI subsystem for error handling to device 10942 * with SLI-4 interface spec. It is called when kernel error recovery tells 10943 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus 10944 * error recovery. After this call, traffic can start to flow from this device 10945 * again. 10946 **/ 10947 static void 10948 lpfc_io_resume_s4(struct pci_dev *pdev) 10949 { 10950 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10951 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 10952 10953 /* 10954 * In case of slot reset, as function reset is performed through 10955 * mailbox command which needs DMA to be enabled, this operation 10956 * has to be moved to the io resume phase. Taking device offline 10957 * will perform the necessary cleanup. 10958 */ 10959 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { 10960 /* Perform device reset */ 10961 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 10962 lpfc_offline(phba); 10963 lpfc_sli_brdrestart(phba); 10964 /* Bring the device back online */ 10965 lpfc_online(phba); 10966 } 10967 10968 /* Clean up Advanced Error Reporting (AER) if needed */ 10969 if (phba->hba_flag & HBA_AER_ENABLED) 10970 pci_cleanup_aer_uncorrect_error_status(pdev); 10971 } 10972 10973 /** 10974 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem 10975 * @pdev: pointer to PCI device 10976 * @pid: pointer to PCI device identifier 10977 * 10978 * This routine is to be registered to the kernel's PCI subsystem. When an 10979 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks 10980 * at PCI device-specific information of the device and driver to see if the 10981 * driver state that it can support this kind of device. If the match is 10982 * successful, the driver core invokes this routine. This routine dispatches 10983 * the action to the proper SLI-3 or SLI-4 device probing routine, which will 10984 * do all the initialization that it needs to do to handle the HBA device 10985 * properly. 10986 * 10987 * Return code 10988 * 0 - driver can claim the device 10989 * negative value - driver can not claim the device 10990 **/ 10991 static int 10992 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 10993 { 10994 int rc; 10995 struct lpfc_sli_intf intf; 10996 10997 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0)) 10998 return -ENODEV; 10999 11000 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) && 11001 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4)) 11002 rc = lpfc_pci_probe_one_s4(pdev, pid); 11003 else 11004 rc = lpfc_pci_probe_one_s3(pdev, pid); 11005 11006 return rc; 11007 } 11008 11009 /** 11010 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem 11011 * @pdev: pointer to PCI device 11012 * 11013 * This routine is to be registered to the kernel's PCI subsystem. When an 11014 * Emulex HBA is removed from PCI bus, the driver core invokes this routine. 11015 * This routine dispatches the action to the proper SLI-3 or SLI-4 device 11016 * remove routine, which will perform all the necessary cleanup for the 11017 * device to be removed from the PCI subsystem properly. 11018 **/ 11019 static void 11020 lpfc_pci_remove_one(struct pci_dev *pdev) 11021 { 11022 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11023 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11024 11025 switch (phba->pci_dev_grp) { 11026 case LPFC_PCI_DEV_LP: 11027 lpfc_pci_remove_one_s3(pdev); 11028 break; 11029 case LPFC_PCI_DEV_OC: 11030 lpfc_pci_remove_one_s4(pdev); 11031 break; 11032 default: 11033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11034 "1424 Invalid PCI device group: 0x%x\n", 11035 phba->pci_dev_grp); 11036 break; 11037 } 11038 return; 11039 } 11040 11041 /** 11042 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management 11043 * @pdev: pointer to PCI device 11044 * @msg: power management message 11045 * 11046 * This routine is to be registered to the kernel's PCI subsystem to support 11047 * system Power Management (PM). When PM invokes this method, it dispatches 11048 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will 11049 * suspend the device. 11050 * 11051 * Return code 11052 * 0 - driver suspended the device 11053 * Error otherwise 11054 **/ 11055 static int 11056 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) 11057 { 11058 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11059 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11060 int rc = -ENODEV; 11061 11062 switch (phba->pci_dev_grp) { 11063 case LPFC_PCI_DEV_LP: 11064 rc = lpfc_pci_suspend_one_s3(pdev, msg); 11065 break; 11066 case LPFC_PCI_DEV_OC: 11067 rc = lpfc_pci_suspend_one_s4(pdev, msg); 11068 break; 11069 default: 11070 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11071 "1425 Invalid PCI device group: 0x%x\n", 11072 phba->pci_dev_grp); 11073 break; 11074 } 11075 return rc; 11076 } 11077 11078 /** 11079 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management 11080 * @pdev: pointer to PCI device 11081 * 11082 * This routine is to be registered to the kernel's PCI subsystem to support 11083 * system Power Management (PM). When PM invokes this method, it dispatches 11084 * the action to the proper SLI-3 or SLI-4 device resume routine, which will 11085 * resume the device. 11086 * 11087 * Return code 11088 * 0 - driver suspended the device 11089 * Error otherwise 11090 **/ 11091 static int 11092 lpfc_pci_resume_one(struct pci_dev *pdev) 11093 { 11094 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11095 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11096 int rc = -ENODEV; 11097 11098 switch (phba->pci_dev_grp) { 11099 case LPFC_PCI_DEV_LP: 11100 rc = lpfc_pci_resume_one_s3(pdev); 11101 break; 11102 case LPFC_PCI_DEV_OC: 11103 rc = lpfc_pci_resume_one_s4(pdev); 11104 break; 11105 default: 11106 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11107 "1426 Invalid PCI device group: 0x%x\n", 11108 phba->pci_dev_grp); 11109 break; 11110 } 11111 return rc; 11112 } 11113 11114 /** 11115 * lpfc_io_error_detected - lpfc method for handling PCI I/O error 11116 * @pdev: pointer to PCI device. 11117 * @state: the current PCI connection state. 11118 * 11119 * This routine is registered to the PCI subsystem for error handling. This 11120 * function is called by the PCI subsystem after a PCI bus error affecting 11121 * this device has been detected. When this routine is invoked, it dispatches 11122 * the action to the proper SLI-3 or SLI-4 device error detected handling 11123 * routine, which will perform the proper error detected operation. 11124 * 11125 * Return codes 11126 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery 11127 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11128 **/ 11129 static pci_ers_result_t 11130 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 11131 { 11132 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11133 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11134 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11135 11136 switch (phba->pci_dev_grp) { 11137 case LPFC_PCI_DEV_LP: 11138 rc = lpfc_io_error_detected_s3(pdev, state); 11139 break; 11140 case LPFC_PCI_DEV_OC: 11141 rc = lpfc_io_error_detected_s4(pdev, state); 11142 break; 11143 default: 11144 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11145 "1427 Invalid PCI device group: 0x%x\n", 11146 phba->pci_dev_grp); 11147 break; 11148 } 11149 return rc; 11150 } 11151 11152 /** 11153 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch 11154 * @pdev: pointer to PCI device. 11155 * 11156 * This routine is registered to the PCI subsystem for error handling. This 11157 * function is called after PCI bus has been reset to restart the PCI card 11158 * from scratch, as if from a cold-boot. When this routine is invoked, it 11159 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling 11160 * routine, which will perform the proper device reset. 11161 * 11162 * Return codes 11163 * PCI_ERS_RESULT_RECOVERED - the device has been recovered 11164 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered 11165 **/ 11166 static pci_ers_result_t 11167 lpfc_io_slot_reset(struct pci_dev *pdev) 11168 { 11169 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11170 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11171 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; 11172 11173 switch (phba->pci_dev_grp) { 11174 case LPFC_PCI_DEV_LP: 11175 rc = lpfc_io_slot_reset_s3(pdev); 11176 break; 11177 case LPFC_PCI_DEV_OC: 11178 rc = lpfc_io_slot_reset_s4(pdev); 11179 break; 11180 default: 11181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11182 "1428 Invalid PCI device group: 0x%x\n", 11183 phba->pci_dev_grp); 11184 break; 11185 } 11186 return rc; 11187 } 11188 11189 /** 11190 * lpfc_io_resume - lpfc method for resuming PCI I/O operation 11191 * @pdev: pointer to PCI device 11192 * 11193 * This routine is registered to the PCI subsystem for error handling. It 11194 * is called when kernel error recovery tells the lpfc driver that it is 11195 * OK to resume normal PCI operation after PCI bus error recovery. When 11196 * this routine is invoked, it dispatches the action to the proper SLI-3 11197 * or SLI-4 device io_resume routine, which will resume the device operation. 11198 **/ 11199 static void 11200 lpfc_io_resume(struct pci_dev *pdev) 11201 { 11202 struct Scsi_Host *shost = pci_get_drvdata(pdev); 11203 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; 11204 11205 switch (phba->pci_dev_grp) { 11206 case LPFC_PCI_DEV_LP: 11207 lpfc_io_resume_s3(pdev); 11208 break; 11209 case LPFC_PCI_DEV_OC: 11210 lpfc_io_resume_s4(pdev); 11211 break; 11212 default: 11213 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11214 "1429 Invalid PCI device group: 0x%x\n", 11215 phba->pci_dev_grp); 11216 break; 11217 } 11218 return; 11219 } 11220 11221 /** 11222 * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter 11223 * @phba: pointer to lpfc hba data structure. 11224 * 11225 * This routine checks to see if OAS is supported for this adapter. If 11226 * supported, the configure Flash Optimized Fabric flag is set. Otherwise, 11227 * the enable oas flag is cleared and the pool created for OAS device data 11228 * is destroyed. 11229 * 11230 **/ 11231 void 11232 lpfc_sli4_oas_verify(struct lpfc_hba *phba) 11233 { 11234 11235 if (!phba->cfg_EnableXLane) 11236 return; 11237 11238 if (phba->sli4_hba.pc_sli4_params.oas_supported) { 11239 phba->cfg_fof = 1; 11240 } else { 11241 phba->cfg_fof = 0; 11242 if (phba->device_data_mem_pool) 11243 mempool_destroy(phba->device_data_mem_pool); 11244 phba->device_data_mem_pool = NULL; 11245 } 11246 11247 return; 11248 } 11249 11250 /** 11251 * lpfc_fof_queue_setup - Set up all the fof queues 11252 * @phba: pointer to lpfc hba data structure. 11253 * 11254 * This routine is invoked to set up all the fof queues for the FC HBA 11255 * operation. 11256 * 11257 * Return codes 11258 * 0 - successful 11259 * -ENOMEM - No available memory 11260 **/ 11261 int 11262 lpfc_fof_queue_setup(struct lpfc_hba *phba) 11263 { 11264 struct lpfc_sli *psli = &phba->sli; 11265 int rc; 11266 11267 rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); 11268 if (rc) 11269 return -ENOMEM; 11270 11271 if (phba->cfg_fof) { 11272 11273 rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, 11274 phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); 11275 if (rc) 11276 goto out_oas_cq; 11277 11278 rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, 11279 phba->sli4_hba.oas_cq, LPFC_FCP); 11280 if (rc) 11281 goto out_oas_wq; 11282 11283 phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; 11284 phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; 11285 } 11286 11287 return 0; 11288 11289 out_oas_wq: 11290 lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); 11291 out_oas_cq: 11292 lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); 11293 return rc; 11294 11295 } 11296 11297 /** 11298 * lpfc_fof_queue_create - Create all the fof queues 11299 * @phba: pointer to lpfc hba data structure. 11300 * 11301 * This routine is invoked to allocate all the fof queues for the FC HBA 11302 * operation. For each SLI4 queue type, the parameters such as queue entry 11303 * count (queue depth) shall be taken from the module parameter. For now, 11304 * we just use some constant number as place holder. 11305 * 11306 * Return codes 11307 * 0 - successful 11308 * -ENOMEM - No availble memory 11309 * -EIO - The mailbox failed to complete successfully. 11310 **/ 11311 int 11312 lpfc_fof_queue_create(struct lpfc_hba *phba) 11313 { 11314 struct lpfc_queue *qdesc; 11315 11316 /* Create FOF EQ */ 11317 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 11318 phba->sli4_hba.eq_ecount); 11319 if (!qdesc) 11320 goto out_error; 11321 11322 phba->sli4_hba.fof_eq = qdesc; 11323 11324 if (phba->cfg_fof) { 11325 11326 /* Create OAS CQ */ 11327 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 11328 phba->sli4_hba.cq_ecount); 11329 if (!qdesc) 11330 goto out_error; 11331 11332 phba->sli4_hba.oas_cq = qdesc; 11333 11334 /* Create OAS WQ */ 11335 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 11336 phba->sli4_hba.wq_ecount); 11337 if (!qdesc) 11338 goto out_error; 11339 11340 phba->sli4_hba.oas_wq = qdesc; 11341 11342 } 11343 return 0; 11344 11345 out_error: 11346 lpfc_fof_queue_destroy(phba); 11347 return -ENOMEM; 11348 } 11349 11350 /** 11351 * lpfc_fof_queue_destroy - Destroy all the fof queues 11352 * @phba: pointer to lpfc hba data structure. 11353 * 11354 * This routine is invoked to release all the SLI4 queues with the FC HBA 11355 * operation. 11356 * 11357 * Return codes 11358 * 0 - successful 11359 **/ 11360 int 11361 lpfc_fof_queue_destroy(struct lpfc_hba *phba) 11362 { 11363 /* Release FOF Event queue */ 11364 if (phba->sli4_hba.fof_eq != NULL) { 11365 lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); 11366 phba->sli4_hba.fof_eq = NULL; 11367 } 11368 11369 /* Release OAS Completion queue */ 11370 if (phba->sli4_hba.oas_cq != NULL) { 11371 lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); 11372 phba->sli4_hba.oas_cq = NULL; 11373 } 11374 11375 /* Release OAS Work queue */ 11376 if (phba->sli4_hba.oas_wq != NULL) { 11377 lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); 11378 phba->sli4_hba.oas_wq = NULL; 11379 } 11380 return 0; 11381 } 11382 11383 MODULE_DEVICE_TABLE(pci, lpfc_id_table); 11384 11385 static const struct pci_error_handlers lpfc_err_handler = { 11386 .error_detected = lpfc_io_error_detected, 11387 .slot_reset = lpfc_io_slot_reset, 11388 .resume = lpfc_io_resume, 11389 }; 11390 11391 static struct pci_driver lpfc_driver = { 11392 .name = LPFC_DRIVER_NAME, 11393 .id_table = lpfc_id_table, 11394 .probe = lpfc_pci_probe_one, 11395 .remove = lpfc_pci_remove_one, 11396 .suspend = lpfc_pci_suspend_one, 11397 .resume = lpfc_pci_resume_one, 11398 .err_handler = &lpfc_err_handler, 11399 }; 11400 11401 static const struct file_operations lpfc_mgmt_fop = { 11402 .owner = THIS_MODULE, 11403 }; 11404 11405 static struct miscdevice lpfc_mgmt_dev = { 11406 .minor = MISC_DYNAMIC_MINOR, 11407 .name = "lpfcmgmt", 11408 .fops = &lpfc_mgmt_fop, 11409 }; 11410 11411 /** 11412 * lpfc_init - lpfc module initialization routine 11413 * 11414 * This routine is to be invoked when the lpfc module is loaded into the 11415 * kernel. The special kernel macro module_init() is used to indicate the 11416 * role of this routine to the kernel as lpfc module entry point. 11417 * 11418 * Return codes 11419 * 0 - successful 11420 * -ENOMEM - FC attach transport failed 11421 * all others - failed 11422 */ 11423 static int __init 11424 lpfc_init(void) 11425 { 11426 int cpu; 11427 int error = 0; 11428 11429 printk(LPFC_MODULE_DESC "\n"); 11430 printk(LPFC_COPYRIGHT "\n"); 11431 11432 error = misc_register(&lpfc_mgmt_dev); 11433 if (error) 11434 printk(KERN_ERR "Could not register lpfcmgmt device, " 11435 "misc_register returned with status %d", error); 11436 11437 lpfc_transport_functions.vport_create = lpfc_vport_create; 11438 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 11439 lpfc_transport_template = 11440 fc_attach_transport(&lpfc_transport_functions); 11441 if (lpfc_transport_template == NULL) 11442 return -ENOMEM; 11443 lpfc_vport_transport_template = 11444 fc_attach_transport(&lpfc_vport_transport_functions); 11445 if (lpfc_vport_transport_template == NULL) { 11446 fc_release_transport(lpfc_transport_template); 11447 return -ENOMEM; 11448 } 11449 11450 /* Initialize in case vector mapping is needed */ 11451 lpfc_used_cpu = NULL; 11452 lpfc_present_cpu = 0; 11453 for_each_present_cpu(cpu) 11454 lpfc_present_cpu++; 11455 11456 error = pci_register_driver(&lpfc_driver); 11457 if (error) { 11458 fc_release_transport(lpfc_transport_template); 11459 fc_release_transport(lpfc_vport_transport_template); 11460 } 11461 11462 return error; 11463 } 11464 11465 /** 11466 * lpfc_exit - lpfc module removal routine 11467 * 11468 * This routine is invoked when the lpfc module is removed from the kernel. 11469 * The special kernel macro module_exit() is used to indicate the role of 11470 * this routine to the kernel as lpfc module exit point. 11471 */ 11472 static void __exit 11473 lpfc_exit(void) 11474 { 11475 misc_deregister(&lpfc_mgmt_dev); 11476 pci_unregister_driver(&lpfc_driver); 11477 fc_release_transport(lpfc_transport_template); 11478 fc_release_transport(lpfc_vport_transport_template); 11479 if (_dump_buf_data) { 11480 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " 11481 "_dump_buf_data at 0x%p\n", 11482 (1L << _dump_buf_data_order), _dump_buf_data); 11483 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); 11484 } 11485 11486 if (_dump_buf_dif) { 11487 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " 11488 "_dump_buf_dif at 0x%p\n", 11489 (1L << _dump_buf_dif_order), _dump_buf_dif); 11490 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); 11491 } 11492 kfree(lpfc_used_cpu); 11493 idr_destroy(&lpfc_hba_index); 11494 } 11495 11496 module_init(lpfc_init); 11497 module_exit(lpfc_exit); 11498 MODULE_LICENSE("GPL"); 11499 MODULE_DESCRIPTION(LPFC_MODULE_DESC); 11500 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com"); 11501 MODULE_VERSION("0:" LPFC_DRIVER_VERSION); 11502